diff --git a/.coveragerc b/.coveragerc index dc80977522..23c93dc22a 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,5 +1,6 @@ [run] branch = True +disable_warnings = module-not-measured source = src/borg omit = */borg/__init__.py diff --git a/.editorconfig b/.editorconfig index 81d95c4399..5ce940b00e 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,4 +1,4 @@ -# EditorConfig is awesome: http://EditorConfig.org +# EditorConfig is awesome: https://editorconfig.org/ root = true diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000..35caf585e4 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,142 @@ +# badge: https://github.com/borgbackup/borg/workflows/CI/badge.svg?branch=master + +name: CI + +on: + push: + branches: [ 1.1-maint ] + paths: + - '**.py' + - '**.pyx' + - '**.c' + - '**.h' + - '**.yml' + - '**.cfg' + - '**.ini' + - 'requirements.d/*' + - '!docs/**' + pull_request: + branches: [ 1.1-maint ] + paths: + - '**.py' + - '**.pyx' + - '**.c' + - '**.h' + - '**.yml' + - '**.cfg' + - '**.ini' + - 'requirements.d/*' + - '!docs/**' + +jobs: + lint: + + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Lint with flake8 + run: | + pip install flake8 + flake8 src scripts conftest.py + + pytest: + + needs: lint + strategy: + matrix: + include: + - os: ubuntu-20.04 + python-version: '3.5' + toxenv: py35 + - os: ubuntu-20.04 + python-version: '3.6' + toxenv: py36 + - os: ubuntu-20.04 + python-version: '3.7' + toxenv: py37 + - os: ubuntu-20.04 + python-version: '3.8' + toxenv: py38 + - os: ubuntu-20.04 + python-version: '3.9' + toxenv: py39 + - os: ubuntu-20.04 + python-version: '3.10-dev' + toxenv: py310 + - os: macos-latest + # note: it seems that 3.8 and 3.9 are currently broken, + # neverending RuntimeError crashes... + python-version: '3.7' + toxenv: py37 + + env: + # Configure pkg-config to use OpenSSL from Homebrew + PKG_CONFIG_PATH: /usr/local/opt/openssl@1.1/lib/pkgconfig + TOXENV: ${{ matrix.toxenv }} + + runs-on: ${{ matrix.os }} + timeout-minutes: 40 + + steps: + - uses: actions/checkout@v2 + with: + # just fetching 1 commit is not enough for setuptools-scm, so we fetch all + fetch-depth: 0 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Cache pip + uses: actions/cache@v2 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-pip-${{ hashFiles('requirements.d/development.txt') }} + restore-keys: | + ${{ runner.os }}-pip- + ${{ runner.os }}- + + - name: Install Linux packages + if: ${{ runner.os == 'Linux' }} + run: | + sudo apt-get update + sudo apt-get install -y libssl-dev libacl1-dev liblz4-dev libzstd-dev pkg-config build-essential + sudo apt-get install -y libxxhash-dev || true + sudo apt-get install -y libb2-dev || true + sudo apt-get install -y libfuse-dev fuse || true # Required for Python llfuse module + + - name: Install macOS packages + if: ${{ runner.os == 'macOS' }} + run: | + brew install pkg-config || brew upgrade pkg-config + brew install zstd || brew upgrade zstd + brew install lz4 || brew upgrade lz4 + brew install openssl@1.1 || brew upgrade openssl@1.1 + brew install homebrew/cask/osxfuse || brew upgrade homebrew/cask/osxfuse # Required for Python llfuse module + + - name: Install Python requirements + run: | + python -m pip install --upgrade pip setuptools wheel + pip install -r requirements.d/development.txt + - name: Install borgbackup + run: | + # pip install -e . + python setup.py -v develop + - name: run pytest via tox + run: | + # do not use fakeroot, but run as root. avoids the dreaded EISDIR sporadic failures. see #2482. + #sudo -E bash -c "tox -e py" + tox --skip-missing-interpreters + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + env: + OS: ${{ runner.os }} + python: ${{ matrix.python-version }} + with: + token: ${{ secrets.CODECOV_TOKEN }} + env_vars: OS, python diff --git a/.gitignore b/.gitignore index 26af5221f0..f0ec2c0bdc 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,10 @@ src/borg/algorithms/checksums.c src/borg/platform/darwin.c src/borg/platform/freebsd.c src/borg/platform/linux.c +src/borg/platform/syncfilerange.c src/borg/platform/posix.c +src/borg/algorithms/msgpack/_packer.cpp +src/borg/algorithms/msgpack/_unpacker.cpp src/borg/_version.py *.egg-info *.pyc @@ -21,6 +24,7 @@ src/borg/_version.py *.so .idea/ .cache/ +.vscode/ borg.build/ borg.dist/ borg.exe diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index e12266b9dc..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,68 +0,0 @@ -sudo: required - -language: python - -cache: - directories: - - $HOME/.cache/pip - -# note: use py 3.5.2, it has lzma support. 3.5(.0) on travis.org/trusty does not. -matrix: - include: - - python: 3.4 - os: linux - dist: trusty - env: TOXENV=py34 - - python: 3.5.2 - os: linux - dist: trusty - env: TOXENV=py35 - - python: 3.6 - os: linux - dist: trusty - env: TOXENV=py36 - - python: 3.4 - os: linux - dist: trusty - env: TOXENV=flake8 - - python: "3.6-dev" - os: linux - dist: trusty - env: TOXENV=py36 - - language: generic - os: osx - osx_image: xcode6.4 - env: TOXENV=py34 - - language: generic - os: osx - osx_image: xcode6.4 - env: TOXENV=py35 - - language: generic - os: osx - osx_image: xcode6.4 - env: TOXENV=py36 - -before_install: -- | - echo Checking whether $TRAVIS_COMMIT_RANGE changed only docs - git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(AUTHORS|README\.rst|^(docs)/)' || { - echo "Only docs were updated, stopping build process." - exit - } - -install: - - git fetch --unshallow --tags - - ./.travis/install.sh - -script: - - ./.travis/run.sh - -after_success: - - ./.travis/upload_coverage.sh - -notifications: - irc: - channels: - - "irc.freenode.org#borgbackup" - use_notice: true - skip_join: true diff --git a/.travis/install.sh b/.travis/install.sh deleted file mode 100755 index 708b8c81b1..0000000000 --- a/.travis/install.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -e -set -x - -if [[ "$(uname -s)" == 'Darwin' ]]; then - brew update || brew update - - if [[ "${OPENSSL}" != "0.9.8" ]]; then - brew outdated openssl || brew upgrade openssl - fi - - if which pyenv > /dev/null; then - eval "$(pyenv init -)" - fi - - brew install lz4 - brew install xz # required for python lzma module - brew outdated pyenv || brew upgrade pyenv - brew install pkg-config - brew install Caskroom/cask/osxfuse - - case "${TOXENV}" in - py34) - pyenv install 3.4.5 - pyenv global 3.4.5 - ;; - py35) - pyenv install 3.5.2 - pyenv global 3.5.2 - ;; - py36) - pyenv install 3.6.0 - pyenv global 3.6.0 - ;; - esac - pyenv rehash - python -m pip install --user virtualenv -else - pip install virtualenv - sudo apt-get update - sudo apt-get install -y fakeroot - sudo apt-get install -y liblz4-dev - sudo apt-get install -y libacl1-dev - sudo apt-get install -y libfuse-dev fuse pkg-config # optional, for FUSE support -fi - -python -m virtualenv ~/.venv -source ~/.venv/bin/activate -pip install -r requirements.d/development.txt -pip install codecov -python setup.py --version -pip install -e .[fuse] diff --git a/.travis/run.sh b/.travis/run.sh deleted file mode 100755 index 7c1e847c12..0000000000 --- a/.travis/run.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -e -set -x - -if [[ "$(uname -s)" == "Darwin" ]]; then - eval "$(pyenv init -)" - if [[ "${OPENSSL}" != "0.9.8" ]]; then - # set our flags to use homebrew openssl - export ARCHFLAGS="-arch x86_64" - export LDFLAGS="-L/usr/local/opt/openssl/lib" - export CFLAGS="-I/usr/local/opt/openssl/include" - fi -fi - -source ~/.venv/bin/activate - -if [[ "$(uname -s)" == "Darwin" ]]; then - # no fakeroot on OS X - sudo tox -e $TOXENV -r -else - fakeroot -u tox -r -fi diff --git a/.travis/upload_coverage.sh b/.travis/upload_coverage.sh deleted file mode 100755 index d0b545244c..0000000000 --- a/.travis/upload_coverage.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -e -set -x - -NO_COVERAGE_TOXENVS=(pep8) -if ! [[ "${NO_COVERAGE_TOXENVS[*]}" =~ ${TOXENV} ]]; then - source ~/.venv/bin/activate - # on osx, tests run as root, need access to .coverage - sudo chmod 666 .coverage - codecov -e TRAVIS_OS_NAME TOXENV -fi diff --git a/AUTHORS b/AUTHORS index bfb56cfedb..fa15a9838b 100644 --- a/AUTHORS +++ b/AUTHORS @@ -1,8 +1,12 @@ +E-mail addresses listed here are not intended for support, please see +the `support section`_ instead. + +.. support section: https://borgbackup.readthedocs.io/en/stable/support.html + Borg authors ("The Borg Collective") ------------------------------------ - Thomas Waldmann -- Antoine Beaupré - Radek Podgorny - Yuri D'Elia - Michael Hanselmann @@ -12,6 +16,12 @@ Borg authors ("The Borg Collective") - Daniel Reichelt - Lauri Niskanen - Abdel-Rahman A. (Abogical) +- Gu1nness + +Retired +``````` + +- Antoine Beaupré Borg is a fork of Attic. @@ -52,6 +62,12 @@ Folding CRC32 Borg includes an extremely fast folding implementation of CRC32, Copyright 2013 Intel Corporation, licensed under the terms of the zlib license. +msgpack +------- + +Borg includes Python msgpack, Copyright 2008-2011 INADA Naoki +licensed under the terms of the Apache License 2.0. + xxHash ------ diff --git a/LICENSE b/LICENSE index 1928806f02..a85a6ebdf2 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2015-2017 The Borg Collective (see AUTHORS file) +Copyright (C) 2015-2021 The Borg Collective (see AUTHORS file) Copyright (C) 2010-2014 Jonas Borgström All rights reserved. diff --git a/MANIFEST.in b/MANIFEST.in index 2ec72dc240..a496cf74aa 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,8 +1,5 @@ -include README.rst AUTHORS LICENSE CHANGES.rst MANIFEST.in -exclude .coafile .coveragerc .gitattributes .gitignore .travis.yml Vagrantfile -prune .travis +# stuff we need to include into the sdist is handled automatically by +# setuptools_scm - it includes all git-committed files. +# but we want to exclude some committed files/dirs not needed in the sdist: +exclude .coafile .editorconfig .gitattributes .gitignore .mailmap Vagrantfile prune .github -graft src -graft docs -prune docs/_build -global-exclude *.py[co] *.orig *.so *.dll diff --git a/README.rst b/README.rst index fbdd9d53ef..50d3d79a8d 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,6 @@ -|screencast| +|screencast_basic| +More screencasts: `installation`_, `advanced usage`_ What is BorgBackup? ------------------- @@ -15,8 +16,10 @@ fully trusted targets. See the `installation manual`_ or, if you have already downloaded Borg, ``docs/installation.rst`` to get started with Borg. +There is also an `offline documentation`_ available, in multiple formats. .. _installation manual: https://borgbackup.readthedocs.org/en/stable/installation.html +.. _offline documentation: https://readthedocs.org/projects/borgbackup/downloads Main features ~~~~~~~~~~~~~ @@ -47,7 +50,7 @@ Main features and will still be found by the deduplication algorithm. **Speed** - * performance critical code (chunking, compression, encryption) is + * performance-critical code (chunking, compression, encryption) is implemented in C/Cython * local caching of files/chunks index data * quick detection of unmodified files @@ -57,8 +60,13 @@ Main features authenticity is verified using HMAC-SHA256. Data is encrypted clientside. **Compression** - All data can be compressed by lz4 (super fast, low compression), zlib - (medium speed and compression) or lzma (low speed, high compression). + All data can be optionally compressed: + + * lz4 (super fast, low compression) + * zstd (wide range from high speed and low compression to high compression + and lower speed) + * zlib (medium speed and compression) + * lzma (low speed, high compression) **Off-site backups** Borg can store data on any remote host accessible over SSH. If Borg is @@ -77,8 +85,8 @@ Main features * Mac OS X * FreeBSD * OpenBSD and NetBSD (no xattrs/ACLs support or binaries yet) - * Cygwin (not supported, no binaries yet) - * Linux Subsystem of Windows 10 (not supported) + * Cygwin (experimental, no binaries yet) + * Linux Subsystem of Windows 10 (experimental) **Free and Open Source Software** * security and functionality can be audited independently @@ -88,9 +96,12 @@ Main features Easy to use ~~~~~~~~~~~ -Initialize a new backup repository and create a backup archive:: +Initialize a new backup repository (see ``borg init --help`` for encryption options):: + + $ borg init -e repokey /path/to/repo + +Create a backup archive:: - $ borg init /path/to/repo $ borg create /path/to/repo::Saturday1 ~/Documents Now doing another backup, just to show off the great deduplication:: @@ -115,15 +126,16 @@ Now doing another backup, just to show off the great deduplication:: For a graphical frontend refer to our complementary project `BorgWeb `_. -Helping, Donations and Bounties -------------------------------- +Helping, Donations and Bounties, becoming a Patron +-------------------------------------------------- Your help is always welcome! + Spread the word, give feedback, help with documentation, testing or development. You can also give monetary support to the project, see there for details: -https://borgbackup.readthedocs.io/en/stable/support.html#bounties-and-fundraisers +https://www.borgbackup.org/support/fund.html Links ----- @@ -132,9 +144,10 @@ Links * `Releases `_, `PyPI packages `_ and `ChangeLog `_ +* `Offline Documentation `_ * `GitHub `_ and `Issue Tracker `_. -* `Web-Chat (IRC) `_ and +* `Web-Chat (IRC) `_ and `Mailing List `_ * `License `_ * `Security contact `_ @@ -150,7 +163,7 @@ NOT RELEASED DEVELOPMENT VERSIONS HAVE UNKNOWN COMPATIBILITY PROPERTIES. THIS IS SOFTWARE IN DEVELOPMENT, DECIDE YOURSELF WHETHER IT FITS YOUR NEEDS. Security issues should be reported to the `Security contact`_ (or -see ``docs/suppport.rst`` in the source distribution). +see ``docs/support.rst`` in the source distribution). .. start-badges @@ -164,17 +177,21 @@ see ``docs/suppport.rst`` in the source distribution). :alt: Documentation :target: https://borgbackup.readthedocs.org/en/stable/ -.. |build| image:: https://api.travis-ci.org/borgbackup/borg.svg - :alt: Build Status - :target: https://travis-ci.org/borgbackup/borg +.. |build| image:: https://github.com/borgbackup/borg/workflows/CI/badge.svg?branch=1.1-maint + :alt: Build Status (1.1-maint) + :target: https://github.com/borgbackup/borg/actions + +.. |coverage| image:: https://codecov.io/github/borgbackup/borg/coverage.svg?branch=1.1-maint + :alt: Test Coverage (1.1-maint) + :target: https://codecov.io/github/borgbackup/borg?branch=1.1-maint + +.. |screencast_basic| image:: https://asciinema.org/a/133292.png + :alt: BorgBackup Basic Usage + :target: https://asciinema.org/a/133292?autoplay=1&speed=1 -.. |coverage| image:: https://codecov.io/github/borgbackup/borg/coverage.svg?branch=master - :alt: Test Coverage - :target: https://codecov.io/github/borgbackup/borg?branch=master +.. _installation: https://asciinema.org/a/133291?autoplay=1&speed=1 -.. |screencast| image:: https://asciinema.org/a/28691.png - :alt: BorgBackup Installation and Basic Usage - :target: https://asciinema.org/a/28691?autoplay=1&speed=2 +.. _advanced usage: https://asciinema.org/a/133293?autoplay=1&speed=1 .. |bestpractices| image:: https://bestpractices.coreinfrastructure.org/projects/271/badge :alt: Best Practices Score diff --git a/Vagrantfile b/Vagrantfile index 87897c023b..a773b2d7d6 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -7,40 +7,24 @@ $cpus = Integer(ENV.fetch('VMCPUS', '4')) # create VMs with that many cpus $xdistn = Integer(ENV.fetch('XDISTN', '4')) # dispatch tests to that many pytest workers $wmem = $xdistn * 256 # give the VM additional memory for workers [MB] -def packages_prepare_wheezy +def packages_debianoid(user) return <<-EOF - # debian 7 wheezy does not have lz4, but it is available from wheezy-backports: - echo "deb http://http.debian.net/debian wheezy-backports main" > /etc/apt/sources.list.d/wheezy-backports.list - EOF -end - -def packages_debianoid - return <<-EOF - if id "vagrant" >/dev/null 2>&1; then - username='vagrant' - home_dir=/home/vagrant - else - username='ubuntu' - home_dir=/home/ubuntu - fi + export DEBIAN_FRONTEND=noninteractive apt-get update + # in case we get a grub update, it must not interactively ask for boot device: + echo "set grub-pc/install_devices /dev/sda" | debconf-communicate # install all the (security and other) updates apt-get dist-upgrade -y # for building borgbackup and dependencies: apt-get install -y libssl-dev libacl1-dev liblz4-dev libfuse-dev fuse pkg-config - usermod -a -G fuse $username + usermod -a -G fuse #{user} chgrp fuse /dev/fuse chmod 666 /dev/fuse - apt-get install -y fakeroot build-essential git + apt-get install -y fakeroot build-essential git curl apt-get install -y python3-dev python3-setuptools # for building python: - apt-get install -y zlib1g-dev libbz2-dev libncurses5-dev libreadline-dev liblzma-dev libsqlite3-dev - # this way it works on older dists (like ubuntu 12.04) also: - # for python 3.2 on ubuntu 12.04 we need pip<8 and virtualenv<14 as - # newer versions are not compatible with py 3.2 any more. - easy_install3 'pip<8.0' - pip3 install 'virtualenv<14.0' - touch $home_dir/.bash_profile ; chown $username $home_dir/.bash_profile + apt-get install -y zlib1g-dev libbz2-dev libncurses5-dev libreadline-dev liblzma-dev libsqlite3-dev libffi-dev + apt-get install -y python3-pip virtualenv python3-virtualenv EOF end @@ -57,10 +41,9 @@ def packages_redhatted # needed to compile msgpack-python (otherwise it will use slow fallback code): yum install -y gcc-c++ # for building python: - yum install -y zlib-devel bzip2-devel ncurses-devel readline-devel xz xz-devel sqlite-devel + yum install -y zlib-devel bzip2-devel ncurses-devel readline-devel xz xz-devel sqlite-devel libffi-devel #yum install -y python-pip #pip install virtualenv - touch ~vagrant/.bash_profile ; chown vagrant ~vagrant/.bash_profile EOF end @@ -69,21 +52,15 @@ def packages_darwin # install all the (security and other) updates sudo softwareupdate --ignore iTunesX sudo softwareupdate --ignore iTunes + sudo softwareupdate --ignore Safari + sudo softwareupdate --ignore "Install macOS High Sierra" sudo softwareupdate --install --all - # get osxfuse 3.x release code from github: - curl -s -L https://github.com/osxfuse/osxfuse/releases/download/osxfuse-3.6.3/osxfuse-3.6.3.dmg >osxfuse.dmg - MOUNTDIR=$(echo `hdiutil mount osxfuse.dmg | tail -1 | awk '{$1="" ; print $0}'` | xargs -0 echo) \ - && sudo installer -pkg "${MOUNTDIR}/Extras/FUSE for macOS 3.6.3.pkg" -target / - sudo chown -R vagrant /usr/local # brew must be able to create stuff here - ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" - brew update - brew install openssl - brew install lz4 - brew install xz # required for python lzma module - brew install fakeroot - brew install git - brew install pkg-config - touch ~vagrant/.bash_profile ; chown vagrant ~vagrant/.bash_profile + which brew || CI=1 /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" + brew update > /dev/null + brew install pkg-config readline openssl@1.1 zstd lz4 xz fakeroot git + brew install --cask osxfuse + brew upgrade # upgrade everything + echo 'export PKG_CONFIG_PATH=/usr/local/opt/openssl@1.1/lib/pkgconfig' >> ~vagrant/.bash_profile EOF end @@ -97,7 +74,7 @@ def packages_freebsd pkg install -y openssl liblz4 fusefs-libs pkgconf pkg install -y git bash # for building python: - pkg install -y sqlite3 + pkg install -y sqlite3 libffi # make bash default / work: chsh -s bash vagrant mount -t fdescfs fdesc /dev/fd @@ -110,7 +87,6 @@ def packages_freebsd pw groupmod operator -M vagrant # /dev/fuse has group operator chmod 666 /dev/fuse - touch ~vagrant/.bash_profile ; chown vagrant ~vagrant/.bash_profile # install all the (security and other) updates, packages pkg update yes | pkg upgrade @@ -119,29 +95,23 @@ end def packages_openbsd return <<-EOF - echo 'installpath = http://ftp.hostserver.de/pub/OpenBSD/6.0/packages/amd64/' > /etc/pkg.conf - echo 'export PKG_PATH=http://ftp.hostserver.de/pub/OpenBSD/6.0/packages/amd64/' >> ~/.profile - . ~/.profile pkg_add bash chsh -s /usr/local/bin/bash vagrant - pkg_add openssl pkg_add lz4 + pkg_add zstd pkg_add git # no fakeroot - pkg_add py3-setuptools - ln -sf /usr/local/bin/python3.4 /usr/local/bin/python3 - ln -sf /usr/local/bin/python3.4 /usr/local/bin/python - easy_install-3.4 pip - pip3 install virtualenv - touch ~vagrant/.bash_profile ; chown vagrant ~vagrant/.bash_profile + pkg_add py3-pip + pkg_add py3-virtualenv + ln -sf /usr/local/bin/virtualenv-3 /usr/local/bin/virtualenv EOF end def packages_netbsd return <<-EOF hostname netbsd # the box we use has an invalid hostname - PKG_PATH="ftp://ftp.NetBSD.org/pub/pkgsrc/packages/NetBSD/amd64/6.1.5/All/" + PKG_PATH="ftp://ftp.NetBSD.org/pub/pkgsrc/packages/NetBSD/amd64/7.0.1/All" export PKG_PATH - pkg_add mozilla-rootcerts lz4 git bash + pkg_add mozilla-rootcerts lz4 git chsh -s bash vagrant mkdir -p /usr/local/opt/lz4/include mkdir -p /usr/local/opt/lz4/lib @@ -152,22 +122,26 @@ def packages_netbsd pkg_add pkg-config # avoids some "pkg-config missing" error msg, even without fuse pkg # pkg_add fuse # llfuse supports netbsd, but is still buggy. # https://bitbucket.org/nikratio/python-llfuse/issues/70/perfuse_open-setsockopt-no-buffer-space - pkg_add python34 py34-setuptools - ln -s /usr/pkg/bin/python3.4 /usr/pkg/bin/python - ln -s /usr/pkg/bin/python3.4 /usr/pkg/bin/python3 - easy_install-3.4 pip - pip install virtualenv - touch ~vagrant/.bash_profile ; chown vagrant ~vagrant/.bash_profile + pkg_add python37 py37-sqlite3 py37-pip py37-virtualenv + ln -s /usr/pkg/bin/python3.7 /usr/pkg/bin/python + ln -s /usr/pkg/bin/python3.7 /usr/pkg/bin/python3 + ln -s /usr/pkg/bin/pip3.7 /usr/pkg/bin/pip + ln -s /usr/pkg/bin/pip3.7 /usr/pkg/bin/pip3 + ln -s /usr/pkg/bin/virtualenv-3.7 /usr/pkg/bin/virtualenv + ln -s /usr/pkg/bin/virtualenv-3.7 /usr/pkg/bin/virtualenv3 EOF end def packages_openindiana return <<-EOF - #pkg update # XXX needs separate provisioning step + reboot - pkg install python-34 clang-3.4 lz4 git - python3 -m ensurepip - pip3 install -U setuptools pip wheel virtualenv - touch ~vagrant/.bash_profile ; chown vagrant ~vagrant/.bash_profile + # needs separate provisioning step + reboot: + #pkg update + # already installed: + #pkg install python-37 python-35 virtualenv-35 pip-35 clang-40 lz4 zstd git + pkg install gcc-7 + ln -sf /usr/bin/python3.5 /usr/bin/pyton3 + ln -sf /usr/bin/virtualenv-3.5 /usr/bin/virtualenv + ln -sf /usr/bin/pip-3.5 /usr/bin/pip EOF end @@ -227,13 +201,11 @@ end def install_pyenv(boxname) script = <<-EOF curl -s -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash - echo 'export PATH="$HOME/.pyenv/bin:/vagrant/borg:$PATH"' >> ~/.bash_profile + echo 'export PATH="$HOME/.pyenv/bin:$PATH"' >> ~/.bash_profile echo 'eval "$(pyenv init -)"' >> ~/.bash_profile echo 'eval "$(pyenv virtualenv-init -)"' >> ~/.bash_profile echo 'export PYTHON_CONFIGURE_OPTS="--enable-shared"' >> ~/.bash_profile - echo 'export LANG=en_US.UTF-8' >> ~/.bash_profile EOF - script += "echo 'export XDISTN=%d' >> ~/.bash_profile\n" % [$xdistn] return script end @@ -246,10 +218,11 @@ end def install_pythons(boxname) return <<-EOF . ~/.bash_profile - pyenv install 3.4.0 # tests - pyenv install 3.5.0 # tests - pyenv install 3.6.0 # tests - pyenv install 3.5.3 # binary build, use latest 3.5.x release + pyenv install 3.5.3 # tests, 3.5.3 is first to support openssl 1.1 + pyenv install 3.6.2 # tests + pyenv install 3.7.9 # binary build, use latest 3.7.x release + pyenv install 3.8.0 # tests + pyenv install 3.9.0 # tests pyenv rehash EOF end @@ -266,9 +239,9 @@ def build_pyenv_venv(boxname) return <<-EOF . ~/.bash_profile cd /vagrant/borg - # use the latest 3.5 release - pyenv global 3.5.3 - pyenv virtualenv 3.5.3 borg-env + # use the latest 3.7 release + pyenv global 3.7.9 + pyenv virtualenv 3.7.9 borg-env ln -s ~/.pyenv/versions/borg-env . EOF end @@ -280,10 +253,7 @@ def install_borg(fuse) . borg-env/bin/activate pip install -U wheel # upgrade wheel, too old for 3.5 cd borg - # clean up (wrong/outdated) stuff we likely got via rsync: - rm -rf __pycache__ - find src -name '__pycache__' -exec rm -rf {} \\; - pip install -r requirements.d/development.txt + pip install -r requirements.d/development.lock.txt python setup.py clean EOF if fuse @@ -308,7 +278,7 @@ def install_pyinstaller() . borg-env/bin/activate git clone https://github.com/thomaswaldmann/pyinstaller.git cd pyinstaller - git checkout v3.2.1 + git checkout v4.2-maint python setup.py install EOF end @@ -320,6 +290,8 @@ def build_binary_with_pyinstaller(boxname) . borg-env/bin/activate cd borg pyinstaller --clean --distpath=/vagrant/borg scripts/borg.exe.spec + echo 'export PATH="/vagrant/borg:$PATH"' >> ~/.bash_profile + cd .. && tar -czvf borg.tgz borg-dir EOF end @@ -330,28 +302,32 @@ def run_tests(boxname) . ../borg-env/bin/activate if which pyenv 2> /dev/null; then # for testing, use the earliest point releases of the supported python versions: - pyenv global 3.4.0 3.5.0 3.6.0 - pyenv local 3.4.0 3.5.0 3.6.0 + pyenv global 3.5.3 3.6.2 3.7.9 3.8.0 3.9.0 + pyenv local 3.5.3 3.6.2 3.7.9 3.8.0 3.9.0 fi # otherwise: just use the system python if which fakeroot 2> /dev/null; then echo "Running tox WITH fakeroot -u" - fakeroot -u tox --skip-missing-interpreters + fakeroot -u tox --skip-missing-interpreters -e py35,py36,py37,py38,py39 else echo "Running tox WITHOUT fakeroot -u" - tox --skip-missing-interpreters + tox --skip-missing-interpreters -e py35,py36,py37,py38,py39 fi EOF end -def fix_perms +def fs_init(user) return <<-EOF - # . ~/.profile - if id "vagrant" >/dev/null 2>&1; then - chown -R vagrant /vagrant/borg - else - chown -R ubuntu /vagrant/borg - fi + # clean up (wrong/outdated) stuff we likely got via rsync: + rm -rf /vagrant/borg/borg/.tox 2> /dev/null + rm -rf /vagrant/borg/borg/borgbackup.egg-info 2> /dev/null + rm -rf /vagrant/borg/borg/__pycache__ 2> /dev/null + find /vagrant/borg/borg/src -name '__pycache__' -exec rm -rf {} \\; 2> /dev/null + chown -R #{user} /vagrant/borg + touch ~#{user}/.bash_profile ; chown #{user} ~#{user}/.bash_profile + echo 'export LANG=en_US.UTF-8' >> ~#{user}/.bash_profile + echo 'export LC_CTYPE=en_US.UTF-8' >> ~#{user}/.bash_profile + echo 'export XDISTN=#{$xdistn}' >> ~#{user}/.bash_profile EOF end @@ -361,9 +337,6 @@ Vagrant.configure(2) do |config| # do not let the VM access . on the host machine via the default shared folder! config.vm.synced_folder ".", "/vagrant", disabled: true - # fix permissions on synced folder - config.vm.provision "fix perms", :type => :shell, :inline => fix_perms - config.vm.provider :virtualbox do |v| #v.gui = true v.cpus = $cpus @@ -375,6 +348,7 @@ Vagrant.configure(2) do |config| b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") b.vm.provision "install system packages", :type => :shell, :inline => packages_redhatted b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("centos7_64") b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("centos7_64") @@ -383,52 +357,40 @@ Vagrant.configure(2) do |config| b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("centos7_64") end - config.vm.define "centos6_32" do |b| - b.vm.box = "centos6-32" - b.vm.provider :virtualbox do |v| - v.memory = 768 + $wmem - end - b.vm.provision "install system packages", :type => :shell, :inline => packages_redhatted - b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("centos6_32") - b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("centos6_32") - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_pyenv_venv("centos6_32") - b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(false) - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("centos6_32") - end - - config.vm.define "centos6_64" do |b| - b.vm.box = "centos6-64" + config.vm.define "focal64" do |b| + b.vm.box = "ubuntu/focal64" b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end - b.vm.provision "install system packages", :type => :shell, :inline => packages_redhatted - b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("centos6_64") - b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("centos6_64") - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_pyenv_venv("centos6_64") - b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(false) - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("centos6_64") + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") + b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid("vagrant") + b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("focal64") + b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) + b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("focal64") end - config.vm.define "xenial64" do |b| - b.vm.box = "ubuntu/xenial64" + config.vm.define "bionic64" do |b| + b.vm.box = "ubuntu/bionic64" b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end - b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("xenial64") + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") + b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid("vagrant") + b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("bionic64") b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("xenial64") + b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("bionic64") end - config.vm.define "trusty64" do |b| - b.vm.box = "ubuntu/trusty64" + config.vm.define "buster64" do |b| + b.vm.box = "debian/buster64" b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end - b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("trusty64") + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") + b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid("vagrant") + b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("buster64") b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("trusty64") + b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("buster64") end config.vm.define "stretch64" do |b| @@ -436,62 +398,25 @@ Vagrant.configure(2) do |config| b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end - b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("stretch64") - b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("stretch64") - end - - config.vm.define "jessie64" do |b| - b.vm.box = "debian/jessie64" - b.vm.provider :virtualbox do |v| - v.memory = 1024 + $wmem - end - b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("jessie64") - b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("jessie64") - end - - config.vm.define "wheezy32" do |b| - b.vm.box = "boxcutter/debian7-i386" - b.vm.provider :virtualbox do |v| - v.memory = 768 + $wmem - end - b.vm.provision "packages prepare wheezy", :type => :shell, :inline => packages_prepare_wheezy - b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid - b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("wheezy32") - b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("wheezy32") - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_pyenv_venv("wheezy32") + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") + b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid("vagrant") + b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("stretch64") + b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("stretch64") + b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_pyenv_venv("stretch64") b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) b.vm.provision "install pyinstaller", :type => :shell, :privileged => false, :inline => install_pyinstaller() - b.vm.provision "build binary with pyinstaller", :type => :shell, :privileged => false, :inline => build_binary_with_pyinstaller("wheezy32") - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("wheezy32") - end - - config.vm.define "wheezy64" do |b| - b.vm.box = "boxcutter/debian7" - b.vm.provider :virtualbox do |v| - v.memory = 1024 + $wmem - end - b.vm.provision "packages prepare wheezy", :type => :shell, :inline => packages_prepare_wheezy - b.vm.provision "packages debianoid", :type => :shell, :inline => packages_debianoid - b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("wheezy64") - b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("wheezy64") - b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_pyenv_venv("wheezy64") - b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(true) - b.vm.provision "install pyinstaller", :type => :shell, :privileged => false, :inline => install_pyinstaller() - b.vm.provision "build binary with pyinstaller", :type => :shell, :privileged => false, :inline => build_binary_with_pyinstaller("wheezy64") - b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("wheezy64") + b.vm.provision "build binary with pyinstaller", :type => :shell, :privileged => false, :inline => build_binary_with_pyinstaller("stretch64") + b.vm.provision "run tests", :type => :shell, :privileged => false, :inline => run_tests("stretch64") end # OS X config.vm.define "darwin64" do |b| - b.vm.box = "jhcook/yosemite-clitools" + b.vm.box = "macos-sierra" b.vm.provider :virtualbox do |v| - v.memory = 1536 + $wmem - v.customize ['modifyvm', :id, '--ostype', 'MacOS1010_64'] + v.memory = 2048 + $wmem + v.customize ['modifyvm', :id, '--ostype', 'MacOS_64'] v.customize ['modifyvm', :id, '--paravirtprovider', 'default'] + v.customize ['modifyvm', :id, '--nested-hw-virt', 'on'] # Adjust CPU settings according to # https://github.com/geerlingguy/macos-virtualbox-vm v.customize ['modifyvm', :id, '--cpuidset', @@ -499,6 +424,7 @@ Vagrant.configure(2) do |config| # Disable USB variant requiring Virtualbox proprietary extension pack v.customize ["modifyvm", :id, '--usbehci', 'off', '--usbxhci', 'off'] end + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") b.vm.provision "packages darwin", :type => :shell, :privileged => false, :inline => packages_darwin b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("darwin64") b.vm.provision "fix pyenv", :type => :shell, :privileged => false, :inline => fix_pyenv_darwin("darwin64") @@ -511,13 +437,13 @@ Vagrant.configure(2) do |config| end # BSD - # note: the FreeBSD-10.3-RELEASE box needs "vagrant up" twice to start. config.vm.define "freebsd64" do |b| - b.vm.box = "freebsd/FreeBSD-10.3-RELEASE" + b.vm.box = "freebsd121-64" b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end b.ssh.shell = "sh" + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") b.vm.provision "install system packages", :type => :shell, :inline => packages_freebsd b.vm.provision "install pyenv", :type => :shell, :privileged => false, :inline => install_pyenv("freebsd") b.vm.provision "install pythons", :type => :shell, :privileged => false, :inline => install_pythons("freebsd") @@ -529,11 +455,12 @@ Vagrant.configure(2) do |config| end config.vm.define "openbsd64" do |b| - b.vm.box = "openbsd60-64" # note: basic openbsd install for vagrant WITH sudo and rsync pre-installed + b.vm.box = "openbsd64-64" # note: basic openbsd install for vagrant WITH sudo and rsync pre-installed b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end b.ssh.shell = "sh" + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") b.vm.provision "packages openbsd", :type => :shell, :inline => packages_openbsd b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("openbsd64") b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(false) @@ -545,6 +472,7 @@ Vagrant.configure(2) do |config| b.vm.provider :virtualbox do |v| v.memory = 1024 + $wmem end + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") b.vm.provision "packages netbsd", :type => :shell, :inline => packages_netbsd b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("netbsd64") b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(false) @@ -554,10 +482,11 @@ Vagrant.configure(2) do |config| # rsync on openindiana has troubles, does not set correct owner for /vagrant/borg and thus gives lots of # permission errors. can be manually fixed in the VM by: sudo chown -R vagrant /vagrant/borg ; then rsync again. config.vm.define "openindiana64" do |b| - b.vm.box = "openindiana/hipster" + b.vm.box = "openindiana" b.vm.provider :virtualbox do |v| v.memory = 1536 + $wmem end + b.vm.provision "fs init", :type => :shell, :inline => fs_init("vagrant") b.vm.provision "packages openindiana", :type => :shell, :inline => packages_openindiana b.vm.provision "build env", :type => :shell, :privileged => false, :inline => build_sys_venv("openindiana64") b.vm.provision "install borg", :type => :shell, :privileged => false, :inline => install_borg(false) @@ -580,9 +509,6 @@ Vagrant.configure(2) do |config| #v.gui = true end - # fix permissions placeholder - b.vm.provision "fix perms", :type => :shell, :privileged => false, :inline => "echo 'fix permission placeholder'" - b.vm.provision "packages cygwin", :type => :shell, :privileged => false, :inline => packages_cygwin("x86_64") b.vm.provision :reload b.vm.provision "cygwin install pip", :type => :shell, :privileged => false, :inline => install_cygwin_venv diff --git a/conftest.py b/conftest.py index cc428be1f2..a1e3b6c776 100644 --- a/conftest.py +++ b/conftest.py @@ -15,23 +15,22 @@ pytest.register_assert_rewrite('borg.testsuite') -import borg.cache -from borg.logger import setup_logging +import borg.cache # noqa: E402 +from borg.logger import setup_logging # noqa: E402 # Ensure that the loggers exist for all tests setup_logging() -from borg.testsuite import has_lchflags, has_llfuse -from borg.testsuite import are_symlinks_supported, are_hardlinks_supported, is_utime_fully_supported -from borg.testsuite.platform import fakeroot_detected, are_acls_working -from borg import xattr +from borg.testsuite import has_lchflags, has_llfuse # noqa: E402 +from borg.testsuite import are_symlinks_supported, are_hardlinks_supported, is_utime_fully_supported # noqa: E402 +from borg.testsuite.platform import fakeroot_detected # noqa: E402 @pytest.fixture(autouse=True) def clean_env(tmpdir_factory, monkeypatch): # avoid that we access / modify the user's normal .config / .cache directory: - monkeypatch.setenv('XDG_CONFIG_HOME', tmpdir_factory.mktemp('xdg-config-home')) - monkeypatch.setenv('XDG_CACHE_HOME', tmpdir_factory.mktemp('xdg-cache-home')) + monkeypatch.setenv('XDG_CONFIG_HOME', str(tmpdir_factory.mktemp('xdg-config-home'))) + monkeypatch.setenv('XDG_CACHE_HOME', str(tmpdir_factory.mktemp('xdg-cache-home'))) # also avoid to use anything from the outside environment: keys = [key for key in os.environ if key.startswith('BORG_')] for key in keys: @@ -65,7 +64,8 @@ def __init__(self, request): self.org_cache_wipe_cache = borg.cache.LocalCache.wipe_cache def wipe_should_not_be_called(*a, **kw): - raise AssertionError("Cache wipe was triggered, if this is part of the test add @pytest.mark.allow_cache_wipe") + raise AssertionError("Cache wipe was triggered, if this is part of the test add " + "@pytest.mark.allow_cache_wipe") if 'allow_cache_wipe' not in request.keywords: borg.cache.LocalCache.wipe_cache = wipe_should_not_be_called request.addfinalizer(self.undo) diff --git a/docs/3rd_party/README b/docs/3rd_party/README new file mode 100644 index 0000000000..1f7b9b6ad9 --- /dev/null +++ b/docs/3rd_party/README @@ -0,0 +1,5 @@ +Here we store 3rd party documentation, licenses, etc. + +Please note that all files inside the "borg" package directory (except the +stuff excluded in setup.py) will be INSTALLED, so don't keep docs or licenses +there. diff --git a/src/borg/algorithms/blake2/COPYING b/docs/3rd_party/blake2/COPYING similarity index 100% rename from src/borg/algorithms/blake2/COPYING rename to docs/3rd_party/blake2/COPYING diff --git a/src/borg/algorithms/blake2/README.md b/docs/3rd_party/blake2/README.md similarity index 82% rename from src/borg/algorithms/blake2/README.md rename to docs/3rd_party/blake2/README.md index 696febaa2f..1baf979795 100644 --- a/src/borg/algorithms/blake2/README.md +++ b/docs/3rd_party/blake2/README.md @@ -4,7 +4,7 @@ This is the reference source code package of BLAKE2. All code is triple-licensed under the [CC0](http://creativecommons.org/publicdomain/zero/1.0), the [OpenSSL Licence](https://www.openssl.org/source/license.html), -or the [Apache Public License 2.0](http://www.apache.org/licenses/LICENSE-2.0), +or the [Apache Public License 2.0](https://www.apache.org/licenses/LICENSE-2.0), at your choosing. More: [https://blake2.net](https://blake2.net). [GitHub repository](https://github.com/BLAKE2/BLAKE2). diff --git a/docs/3rd_party/lz4/LICENSE b/docs/3rd_party/lz4/LICENSE new file mode 100644 index 0000000000..74c2cdd7d5 --- /dev/null +++ b/docs/3rd_party/lz4/LICENSE @@ -0,0 +1,24 @@ +LZ4 Library +Copyright (c) 2011-2016, Yann Collet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/3rd_party/msgpack/COPYING b/docs/3rd_party/msgpack/COPYING new file mode 100644 index 0000000000..f067af3aae --- /dev/null +++ b/docs/3rd_party/msgpack/COPYING @@ -0,0 +1,14 @@ +Copyright (C) 2008-2011 INADA Naoki + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/docs/3rd_party/zstd/LICENSE b/docs/3rd_party/zstd/LICENSE new file mode 100644 index 0000000000..a793a80289 --- /dev/null +++ b/docs/3rd_party/zstd/LICENSE @@ -0,0 +1,30 @@ +BSD License + +For Zstandard software + +Copyright (c) 2016-present, Facebook, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name Facebook nor the names of its contributors may be used to + endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/docs/book.rst b/docs/book.rst index a1a1b9fee8..412cce2802 100644 --- a/docs/book.rst +++ b/docs/book.rst @@ -1,3 +1,5 @@ +:orphan: + .. include:: global.rst.inc Borg documentation @@ -15,5 +17,9 @@ Borg documentation usage deployment faq + support + resources + changes internals development + authors diff --git a/docs/changes.rst b/docs/changes.rst index 25909cf30e..3648200dba 100644 --- a/docs/changes.rst +++ b/docs/changes.rst @@ -1,4 +1,3 @@ - .. _important_notes: Important notes @@ -6,6 +5,93 @@ Important notes This section provides information about security and corruption issues. +.. _hashindex_set_bug: + +Pre-1.1.11 potential index corruption / data loss issue +------------------------------------------------------- + +A bug was discovered in our hashtable code, see issue #4829. +The code is used for the client-side chunks cache and the server-side repo index. + +Although borg uses the hashtables very heavily, the index corruption did not +happen too frequently, because it needed specific conditions to happen. + +Data loss required even more specific conditions, so it should be rare (and +also detectable via borg check). + +You might be affected if borg crashed with / complained about: + +- AssertionError: Corrupted segment reference count - corrupted index or hints +- ObjectNotFound: Object with key ... not found in repository ... +- Index mismatch for key b'...'. (..., ...) != (-1, -1) +- ValueError: stats_against: key contained in self but not in master_index. + +Advised procedure to fix any related issue in your indexes/caches: + +- install fixed borg code (on client AND server) +- for all of your clients and repos remove the cache by: + + borg delete --cache-only YOURREPO + + (later, the cache will be re-built automatically) +- for all your repos, rebuild the repo index by: + + borg check --repair YOURREPO + + This will also check all archives and detect if there is any data-loss issue. + +Affected branches / releases: + +- fd06497 introduced the bug into 1.1-maint branch - it affects all borg 1.1.x since 1.1.0b4. +- fd06497 introduced the bug into master branch - it affects all borg 1.2.0 alpha releases. +- c5cd882 introduced the bug into 1.0-maint branch - it affects all borg 1.0.x since 1.0.11rc1. + +The bug was fixed by: + +- 701159a fixes the bug in 1.1-maint branch - will be released with borg 1.1.11. +- fa63150 fixes the bug in master branch - will be released with borg 1.2.0a8. +- 7bb90b6 fixes the bug in 1.0-maint branch. Branch is EOL, no new release is planned as of now. + +.. _broken_validator: + +Pre-1.1.4 potential data corruption issue +----------------------------------------- + +A data corruption bug was discovered in borg check --repair, see issue #3444. + +This is a 1.1.x regression, releases < 1.1 (e.g. 1.0.x) are not affected. + +To avoid data loss, you must not run borg check --repair using an unfixed version +of borg 1.1.x. The first official release that has the fix is 1.1.4. + +Package maintainers may have applied the fix to updated packages of 1.1.x (x<4) +though, see the package maintainer's package changelog to make sure. + +If you never had missing item metadata chunks, the bug has not affected you +even if you did run borg check --repair with an unfixed version. + +When borg check --repair tried to repair corrupt archives that miss item metadata +chunks, the resync to valid metadata in still present item metadata chunks +malfunctioned. This was due to a broken validator that considered all (even valid) +item metadata as invalid. As they were considered invalid, borg discarded them. +Practically, that means the affected files, directories or other fs objects were +discarded from the archive. + +Due to the malfunction, the process was extremely slow, but if you let it +complete, borg would have created a "repaired" archive that has lost a lot of items. +If you interrupted borg check --repair because it was so strangely slow (killing +borg somehow, e.g. Ctrl-C) the transaction was rolled back and no corruption occurred. + +The log message indicating the precondition for the bug triggering looks like: + + item metadata chunk missing [chunk: 001056_bdee87d...a3e50d] + +If you never had that in your borg check --repair runs, you're not affected. + +But if you're unsure or you actually have seen that, better check your archives. +By just using "borg list repo::archive" you can see if all expected filesystem +items are listed. + .. _tam_vuln: Pre-1.0.9 manifest spoofing vulnerability (CVE-2016-10099) @@ -38,7 +124,7 @@ Steps you should take: Prior versions can access and modify repositories with this measure enabled, however, to 1.0.9 or later their modifications are indiscernible from an attack and will raise an error until the below procedure is followed. We are aware that this can -be be annoying in some circumstances, but don't see a way to fix the vulnerability +be annoying in some circumstances, but don't see a way to fix the vulnerability otherwise. In case a version prior to 1.0.9 is used to modify a repository where above procedure @@ -131,6 +217,1085 @@ The best check that everything is ok is to run a dry-run extraction:: Changelog ========= +Version 1.1.16 (2021-03-23) +--------------------------- + +Compatibility notes: + +- When upgrading from borg 1.0.x to 1.1.x, please note: + + - read all the compatibility notes for 1.1.0*, starting from 1.1.0b1. + - borg upgrade: you do not need to and you also should not run it. + - borg might ask some security-related questions once after upgrading. + You can answer them either manually or via environment variable. + One known case is if you use unencrypted repositories, then it will ask + about a unknown unencrypted repository one time. + - your first backup with 1.1.x might be significantly slower (it might + completely read, chunk, hash a lot files) - this is due to the + --files-cache mode change (and happens every time you change mode). + You can avoid the one-time slowdown by using the pre-1.1.0rc4-compatible + mode (but that is less safe for detecting changed files than the default). + See the --files-cache docs for details. +- 1.1.11 removes WSL autodetection (Windows 10 Subsystem for Linux). + If WSL still has a problem with sync_file_range, you need to set + BORG_WORKAROUNDS=basesyncfile in the borg process environment to + work around the WSL issue. +- 1.1.14 changes return codes due to a bug fix: + In case you have scripts expecting rc == 2 for a signal exit, you need to + update them to check for >= 128 (as documented since long). +- 1.1.15 drops python 3.4 support, minimum requirement is 3.5 now. + +Fixes: + +- setup.py: add special openssl prefix for Apple M1 compatibility +- do not recurse into duplicate roots, #5603 +- remove empty shadowed_segments lists, #5275, #5614 +- fix libpython load error when borg fat binary / dir-based binary is invoked + via a symlink by upgrading pyinstaller to v4.2, #5688 +- config: accept non-int value (like 500M or 100G) for max_segment_size or + storage_quota, #5639. + please note: when setting a non-int value for this in a repo config, + using the repo will require borg >= 1.1.16. + +New features: + +- bundled msgpack: drop support for old buffer protocol to support Python 3.10 +- verbose files cache logging via --debug-topic=files_cache, #5659. + Use this if you suspect that borg does not detect unmodified files as expected. +- create/extract: add --noxattrs and --noacls option, #3955. + when given with borg create, borg will not get xattrs / ACLs from input files + (and thus, it will not archive xattrs / ACLs). when given with borg extract, + borg will not read xattrs / ACLs from archive and will not set xattrs / ACLs + on extracted files. +- diff: add --json-lines option, #3765 +- check: debug log segment filename +- borg debug dump-hints + +Other changes: + +- Tab completion support for additional archives for 'borg delete' +- repository: deduplicate code of put and delete, no functional change +- tests: fix result order issue (sporadic test failure on openindiana) +- vagrant: + + - upgrade pyinstaller to v4.2, #5671 + - avoid grub-install asking interactively for device + - remove the xenial box + - update freebsd box to 12.1 +- docs: + + - update macOS install instructions, #5677 + - use macFUSE (not osxfuse) for Apple M1 compatibility + - update docs for dev environment installation instructions, #5643 + - fix grammar in faq + - recomend running tests only on installed versions for setup + - add link back to git-installation + - remove /var/cache exclusion in example commands, #5625. + This is generally a poor idea and shouldn't be promoted through examples. + - add repology.org badge with current packaging status + - explain hash collision + - add unsafe workaround to use an old repo copy, #5722 + +Version 1.1.15 (2020-12-25) +--------------------------- + +Fixes: + +- extract: + + - improve exception handling when setting xattrs, #5092. + - emit a warning message giving the path, xattr key and error message. + - continue trying to restore other xattrs and bsdflags of the same file + after an exception with xattr-setting happened. +- export-tar: + + - set tar format to GNU_FORMAT explicitly, #5274 + - fix memory leak with ssh: remote repository, #5568 + - fix potential memory leak with ssh: remote repository with partial extraction +- create: fix --dry-run and --stats coexistence, #5415 +- use --timestamp for {utcnow} and {now} if given, #5189 + +New features: + +- create: implement --stdin-mode, --stdin-user and --stdin-group, #5333 +- allow appending the files cache filename with BORG_FILES_CACHE_SUFFIX env var + +Other changes: + +- drop python 3.4 support, minimum requirement is 3.5 now. +- enable using libxxhash instead of bundled xxh64 code +- update llfuse requirements (1.3.8) +- set cython language_level in some files to fix warnings +- allow EIO with warning when trying to hardlink +- PropDict: fail early if internal_dict is not a dict +- update shell completions +- tests / CI + + - add a test for the hashindex corruption bug, #5531 #4829 + - fix spurious failure in test_cache_files, #5438 + - added a github ci workflow + - reduce testing on travis, no macOS, no py3x-dev, #5467 + - travis: use newer dists, native py on dist +- vagrant: + + - remove jessie and trusty boxes, #5348 #5383 + - pyinstaller 4.0, build on py379 + - binary build on stretch64, #5348 + - remove easy_install based pip installation +- docs: + + - clarify '--one-file-system' for btrfs, #5391 + - add example for excluding content using the --pattern cmd line arg + - complement the documentation for pattern files and exclude files, #5524 + - made ansible playbook more generic, use package instead of pacman. also + change state from "latest" to "present". + - complete documentation on append-only remote repos, #5497 + - internals: rather talk about target size than statistics, #5336 + - new compression algorithm policy, #1633 #5505 + - faq: add a hint on sleeping computer, #5301 + - note requirements for full disk access on macOS Catalina, #5303 + - fix/improve description of borg upgrade hardlink usage, #5518 +- modernize 1.1 code: + + - drop code/workarounds only needed to support Python 3.4 + - remove workaround for pre-release py37 argparse bug + - removed some outdated comments/docstrings + - requirements: remove some restrictions, lock on current versions + + +Version 1.1.14 (2020-10-07) +--------------------------- + +Fixes: + +- check --repair: fix potential data loss when interrupting it, #5325 +- exit with 128 + signal number (as documented) when borg is killed by a signal, #5161 +- fix hardlinked CACHEDIR.TAG processing, #4911 +- create --read-special: .part files also should be regular files, #5217 +- llfuse dependency: choose least broken 1.3.6/1.3.7. + 1.3.6 is broken on python 3.9, 1.3.7 is broken on FreeBSD. + +Other changes: + +- upgrade bundled xxhash to 0.7.4 +- self test: if it fails, also point to OS and hardware, #5334 +- pyinstaller: compute basepath from spec file location +- prettier error message when archive gets too big, #5307 +- check/recreate are not "experimental" any more (but still potentially dangerous): + + - recreate: remove extra confirmation + - rephrase some warnings, update docs, #5164 +- shell completions: + + - misc. updates / fixes + - support repositories in fish tab completion, #5256 + - complete $BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING + - rewrite zsh completion: + + - completion for almost all optional and positional arguments + - completion for Borg environment variables (parameters) +- use "allow/deny list" instead of "white/black list" wording +- declare "allow_cache_wipe" marker in setup.cfg to avoid pytest warning +- vagrant / tests: + + - misc. fixes / updates + - use python 3.5.10 for binary build + - build directory-based binaries additionally to the single file binaries + - add libffi-dev, required to build python + - use cryptography<3.0, more recent versions break the jessie box + - test on python 3.9 + - do brew update with /dev/null redirect to avoid "too much log output" on travis-ci +- docs: + + - add ssh-agent pull backup method docs, #5288 + - how to approach borg speed issues, #5371 + - mention double --force in prune docs + - update Homebrew install instructions, #5185 + - better description of how cache and rebuilds of it work + - point to borg create --list item flags in recreate usage, #5165 + - add security faq explaining AES-CTR crypto issues, #5254 + - add a note to create from stdin regarding files cache, #5180 + - fix borg.1 manpage generation regression, #5211 + - clarify how exclude options work in recreate, #5193 + - add section for retired contributors + - hint about not misusing private email addresses of contributors for borg support + + +Version 1.1.13 (2020-06-06) +--------------------------- + +Fixes: + +- rebuilt using a current Cython version, compatible with python 3.8, #5214 + + +Version 1.1.12 (2020-06-06) +--------------------------- + +Fixes: + +- fix preload-related memory leak, #5202. +- mount / borgfs (FUSE filesystem): + + - fix FUSE low linear read speed on large files, #5067 + - fix crash on old llfuse without birthtime attrs, #5064 - accidentally + we required llfuse >= 1.3. Now also old llfuse works again. + - set f_namemax in statfs result, #2684 +- update precedence of env vars to set config and cache paths, #4894 +- correctly calculate compression ratio, taking header size into account, too + +New features: + +- --bypass-lock option to bypass locking with read-only repositories + +Other changes: + +- upgrade bundled zstd to 1.4.5 +- travis: adding comments and explanations to Travis config / install script, + improve macOS builds. +- tests: test_delete_force: avoid sporadic test setup issues, #5196 +- misc. vagrant fixes +- the binary for macOS is now built on macOS 10.12 +- the binaries for Linux are now built on Debian 8 "Jessie", #3761 +- docs: + + - PlaceholderError not printed as JSON, #4073 + - "How important is Borg config?", #4941 + - make Sphinx warnings break docs build, #4587 + - some markup / warning fixes + - add "updating borgbackup.org/releases" to release checklist, #4999 + - add "rendering docs" to release checklist, #5000 + - clarify borg init's encryption modes + - add note about patterns and stored paths, #4160 + - add upgrade of tools to pip installation how-to + - document one cause of orphaned chunks in check command, #2295 + - linked recommended restrictions to ssh public keys on borg servers in faq, #4946 + + +Version 1.1.11 (2020-03-08) +--------------------------- + +Fixes: + +- fixed potential index corruption / data loss issue due to bug in hashindex_set, #4829. + Please read and follow the more detailed notes close to the top of this document. +- upgrade bundled xxhash to 0.7.3, #4891. + 0.7.2 is the minimum requirement for correct operations on ARMv6 in non-fixup + mode, where unaligned memory accesses cause bus errors. + 0.7.3 adds some speedups and libxxhash 0.7.3 even has a pkg-config file now. +- upgrade bundled lz4 to 1.9.2 +- upgrade bundled zstd to 1.4.4 +- fix crash when upgrading erroneous hints file, #4922 +- extract: + + - fix KeyError for "partial" extraction, #4607 + - fix "partial" extract for hardlinked contentless file types, #4725 + - fix preloading for old (0.xx) remote servers, #4652 + - fix confusing output of borg extract --list --strip-components, #4934 +- delete: after double-force delete, warn about necessary repair, #4704 +- create: give invalid repo error msg if repo config not found, #4411 +- mount: fix FUSE mount missing st_birthtime, #4763 #4767 +- check: do not stumble over invalid item key, #4845 +- info: if the archive doesn't exist, print a pretty message, #4793 +- SecurityManager.known(): check all files, #4614 +- Repository.open: use stat() to check for repo dir, #4695 +- Repository.check_can_create_repository: use stat() to check, #4695 +- fix invalid archive error message +- fix optional/non-optional location arg, #4541 +- commit-time free space calc: ignore bad compact map entries, #4796 +- ignore EACCES (errno 13) when hardlinking the old config, #4730 +- --prefix / -P: fix processing, avoid argparse issue, #4769 + +New features: + +- enable placeholder usage in all extra archive arguments +- new BORG_WORKAROUNDS mechanism, basesyncfile, #4710 +- recreate: support --timestamp option, #4745 +- support platforms without os.link (e.g. Android with Termux), #4901. + if we don't have os.link, we just extract another copy instead of making a hardlink. +- support linux platforms without sync_file_range (e.g. Android 7 with Termux), #4905 + +Other: + +- ignore --stats when given with --dry-run, but continue, #4373 +- add some ProgressIndicator msgids to code / fix docs, #4935 +- elaborate on "Calculating size" message +- argparser: always use REPOSITORY in metavar, also use more consistent help phrasing. +- check: improve error output for matching index size, see #4829 +- docs: + + - changelog: add advisory about hashindex_set bug #4829 + - better describe BORG_SECURITY_DIR, BORG_CACHE_DIR, #4919 + - infos about cache security assumptions, #4900 + - add FAQ describing difference between a local repo vs. repo on a server. + - document how to test exclusion patterns without performing an actual backup + - timestamps in the files cache are now usually ctime, #4583 + - fix bad reference to borg compact (does not exist in 1.1), #4660 + - create: borg 1.1 is not future any more + - extract: document limitation "needs empty destination", #4598 + - how to supply a passphrase, use crypto devices, #4549 + - fix osxfuse github link in installation docs + - add example of exclude-norecurse rule in help patterns + - update macOS Brew link + - add note about software for automating backups, #4581 + - AUTHORS: mention copyright+license for bundled msgpack + - fix various code blocks in the docs, #4708 + - updated docs to cover use of temp directory on remote, #4545 + - add restore docs, #4670 + - add a pull backup / push restore how-to, #1552 + - add FAQ how to retain original paths, #4532 + - explain difference between --exclude and --pattern, #4118 + - add FAQs for SSH connection issues, #3866 + - improve password FAQ, #4591 + - reiterate that 'file cache names are absolute' in FAQ +- tests: + + - cope with ANY error when importing pytest into borg.testsuite, #4652 + - fix broken test that relied on improper zlib assumptions + - test_fuse: filter out selinux xattrs, #4574 +- travis / vagrant: + + - misc python versions removed / changed (due to openssl 1.1 compatibility) + or added (3.7 and 3.8, for better borg compatibility testing) + - binary building is on python 3.5.9 now +- vagrant: + + - add new boxes: ubuntu 18.04 and 20.04, debian 10 + - update boxes: openindiana, darwin, netbsd + - remove old boxes: centos 6 + - darwin: updated osxfuse to 3.10.4 + - use debian/ubuntu pip/virtualenv packages + - rather use python 3.6.2 than 3.6.0, fixes coverage/sqlite3 issue + - use requirements.d/development.lock.txt to avoid compat issues +- travis: + + - darwin: backport some install code / order from master + - remove deprecated keyword "sudo" from travis config + - allow osx builds to fail, #4955 + this is due to travis-ci frequently being so slow that the OS X builds + just fail because they exceed 50 minutes and get killed by travis. + + +Version 1.1.10 (2019-05-16) +--------------------------- + +Fixes: + +- extract: hang on partial extraction with ssh: repo, when hardlink master + is not matched/extracted and borg hangs on related slave hardlink, #4350 +- lrucache: regularly remove old FDs, #4427 +- avoid stale filehandle issues, #3265 +- freebsd: make xattr platform code api compatible with linux, #3952 +- use whitelist approach for borg serve, #4097 +- borg command shall terminate with rc 2 for ImportErrors, #4424 +- create: only run stat_simple_attrs() once, this increases + backup with lots of unchanged files performance by ~ 5%. +- prune: fix incorrect borg prune --stats output with --dry-run, #4373 +- key export: emit user-friendly error if repo key is exported to a directory, + #4348 + +New features: + +- bundle latest supported msgpack-python release (0.5.6), remove msgpack-python + from setup.py install_requires - by default we use the bundled code now. + optionally, we still support using an external msgpack (see hints in + setup.py), but this requires solid requirements management within + distributions and is not recommended. + borgbackup will break if you upgrade msgpack to an unsupported version. +- display msgpack version as part of sysinfo (e.g. in tracebacks) +- timestamp for borg delete --info added, #4359 +- enable placeholder usage in --comment and --glob-archives, #4559, #4495 + +Other: + +- serve: do not check python/libc for borg serve, #4483 +- shell completions: borg diff second archive +- release scripts: signing binaries with Qubes OS support +- testing: + + - vagrant: upgrade openbsd box to 6.4 + - travis-ci: lock test env to py 3.4 compatible versions, #4343 + - get rid of confusing coverage warning, #2069 + - rename test_mount_hardlinks to test_fuse_mount_hardlinks, + so both can be excluded by "not test_fuse". + - pure-py msgpack warning shall not make a lot of tests fail, #4558 +- docs: + + - add "SSH Configuration" section to "borg serve", #3988, #636, #4485 + - README: new URL for funding options + - add a sample logging.conf in docs/misc, #4380 + - elaborate on append-only mode docs, #3504 + - installation: added Alpine Linux to distribution list, #4415 + - usage.html: only modify window.location when redirecting, #4133 + - add msgpack license to docs/3rd_party/msgpack +- vagrant / binary builds: + + - use python 3.5.7 for builds + - use osxfuse 3.8.3 + + +Version 1.1.9 (2019-02-10) +-------------------------- + +Fixes: + +- security fix: configure FUSE with "default_permissions", #3903 + "default_permissions" is now enforced by borg by default to let the + kernel check uid/gid/mode based permissions. + "ignore_permissions" can be given to not enforce "default_permissions". +- make "hostname" short, even on misconfigured systems, #4262 +- fix free space calculation on macOS (and others?), #4289 +- config: quit with error message when no key is provided, #4223 +- recover_segment: handle too small segment files correctly, #4272 +- correctly release memoryview, #4243 +- avoid diaper pattern in configparser by opening files, #4263 +- add "# cython: language_level=3" directive to .pyx files, #4214 +- info: consider part files for "This archive" stats, #3522 +- work around Microsoft WSL issue #645 (sync_file_range), #1961 + +New features: + +- add --rsh command line option to complement BORG_RSH env var, #1701 +- init: --make-parent-dirs parent1/parent2/repo_dir, #4235 + +Other: + +- add archive name to check --repair output, #3447 +- check for unsupported msgpack versions +- shell completions: + + - new shell completions for borg 1.1.9 + - more complete shell completions for borg mount -o + - added shell completions for borg help + - option arguments for zsh tab completion +- docs: + + - add FAQ regarding free disk space check, #3905 + - update BORG_PASSCOMMAND example and clarify variable expansion, #4249 + - FAQ regarding change of compression settings, #4222 + - add note about BSD flags to changelog, #4246 + - improve logging in example automation script + - add note about files changing during backup, #4081 + - work around the backslash issue, #4280 + - update release workflow using twine (docs, scripts), #4213 + - add warnings on repository copies to avoid future problems, #4272 +- tests: + + - fix the homebrew 1.9 issues on travis-ci, #4254 + - fix duplicate test method name, #4311 + - test_mount_hardlinks: get rid of fakeroot-caused test fails, #3389 + + +Version 1.1.8 (2018-12-09) +-------------------------- + +Fixes: + +- enforce storage quota if set by serve-command, #4093 +- invalid locations: give err msg containing parsed location, #4179 +- list repo: add placeholders for hostname and username, #4130 +- on linux, symlinks can't have ACLs, so don't try to set any, #4044 + +New features: + +- create: added PATH::archive output on INFO log level +- read a passphrase from a file descriptor specified in the + BORG_PASSPHRASE_FD environment variable. + +Other: + +- docs: + + - option --format is required for some expensive-to-compute values for json + + borg list by default does not compute expensive values except when + they are needed. whether they are needed is determined by the format, + in standard mode as well as in --json mode. + - tell that our binaries are x86/x64 amd/intel, bauerj has ARM + - fixed wrong archive name pattern in CRUD benchmark help + - fixed link to cachedir spec in docs, #4140 +- tests: + + - stop using fakeroot on travis, avoids sporadic EISDIR errors, #2482 + - xattr key names must start with "user." on linux + - fix code so flake8 3.6 does not complain + - explicitly convert environment variable to str, #4136 + - fix DeprecationWarning: Flags not at the start of the expression, #4137 + - support pytest4, #4172 +- vagrant: + + - use python 3.5.6 for builds + + +Version 1.1.7 (2018-08-11) +-------------------------- + +Compatibility notes: + +- added support for Python 3.7 + +Fixes: + +- cache lock: use lock_wait everywhere to fix infinite wait, see #3968 +- don't archive tagged dir when recursing an excluded dir, #3991 +- py37 argparse: work around bad default in py 3.7.0a/b/rc, #3996 +- py37 remove loggerDict.clear() from tearDown method, #3805 +- some fixes for bugs which likely did not result in problems in practice: + + - fixed logic bug in platform module API version check + - fixed xattr/acl function prototypes, added missing ones + +New features: + +- init: add warning to store both key and passphrase at safe place(s) +- BORG_HOST_ID env var to work around all-zero MAC address issue, #3985 +- borg debug dump-repo-objs --ghost (dump everything from segment files, + including deleted or superseded objects or commit tags) +- borg debug search-repo-objs (search in repo objects for hex bytes or strings) + +Other changes: + +- add Python 3.7 support +- updated shell completions +- call socket.gethostname only once +- locking: better logging, add some asserts +- borg debug dump-repo-objs: + + - filename layout improvements + - use repository.scan() to get on-disk order +- docs: + + - update installation instructions for macOS + - added instructions to install fuse via homebrew + - improve diff docs + - added note that checkpoints inside files requires 1.1+ + - add link to tempfile module + - remove row/column-spanning from docs source, #4000 #3990 +- tests: + + - fetch less data via os.urandom + - add py37 env for tox + - travis: add 3.7, remove 3.6-dev (we test with -dev in master) +- vagrant / binary builds: + + - use osxfuse 3.8.2 + - use own (uptodate) openindiana box + + +Version 1.1.6 (2018-06-11) +-------------------------- + +Compatibility notes: + +- 1.1.6 changes: + + - also allow msgpack-python 0.5.6. + +Fixes: + +- fix borg exception handling on ENOSPC error with xattrs, #3808 +- prune: fix/improve overall progress display +- borg config repo ... does not need cache/manifest/key, #3802 +- debug dump-repo-objs should not depend on a manifest obj +- pypi package: + + - include .coveragerc, needed by tox.ini + - fix package long description, #3854 + +New features: + +- mount: add uid, gid, umask mount options +- delete: + + - only commit once, #3823 + - implement --dry-run, #3822 +- check: + + - show progress while rebuilding missing manifest, #3787 + - more --repair output +- borg config --list , #3612 + +Other changes: + +- update msgpack requirement, #3753 +- update bundled zstd to 1.3.4, #3745 +- update bundled lz4 code to 1.8.2, #3870 +- docs: + + - describe what BORG_LIBZSTD_PREFIX does + - fix and deduplicate encryption quickstart docs, #3776 +- vagrant: + + - FUSE for macOS: upgrade 3.7.1 to 3.8.0 + - exclude macOS High Sierra upgrade on the darwin64 machine + - remove borgbackup.egg-info dir in fs_init (after rsync) + - use pyenv-based build/test on jessie32/62 + - use local 32 and 64bit debian jessie boxes + - use "vagrant" as username for new xenial box +- travis OS X: use xcode 8.3 (not broken) + + +Version 1.1.5 (2018-04-01) +-------------------------- + +Compatibility notes: + +- 1.1.5 changes: + + - require msgpack-python >= 0.4.6 and < 0.5.0. + 0.5.0+ dropped python 3.4 testing and also caused some other issues because + the python package was renamed to msgpack and emitted some FutureWarning. + +Fixes: + +- create --list: fix that it was never showing M status, #3492 +- create: fix timing for first checkpoint (read files cache early, init + checkpoint timer after that), see #3394 +- extract: set rc=1 when extracting damaged files with all-zero replacement + chunks or with size inconsistencies, #3448 +- diff: consider an empty file as different to a non-existing file, #3688 +- files cache: improve exception handling, #3553 +- ignore exceptions in scandir_inorder() caused by an implicit stat(), + also remove unneeded sort, #3545 +- fixed tab completion problem where a space is always added after path even + when it shouldn't +- build: do .h file content checks in binary mode, fixes build issue for + non-ascii header files on pure-ascii locale platforms, #3544 #3639 +- borgfs: fix patterns/paths processing, #3551 +- config: add some validation, #3566 +- repository config: add validation for max_segment_size, #3592 +- set cache previous_location on load instead of save +- remove platform.uname() call which caused library mismatch issues, #3732 +- add exception handler around deprecated platform.linux_distribution() call +- use same datetime object for {now} and {utcnow}, #3548 + +New features: + +- create: implement --stdin-name, #3533 +- add chunker_params to borg archive info (--json) +- BORG_SHOW_SYSINFO=no to hide system information from exceptions + +Other changes: + +- updated zsh completions for borg 1.1.4 +- files cache related code cleanups +- be more helpful when parsing invalid --pattern values, #3575 +- be more clear in secure-erase warning message, #3591 +- improve getpass user experience, #3689 +- docs build: unicode problem fixed when using a py27-based sphinx +- docs: + + - security: explicitly note what happens OUTSIDE the attack model + - security: add note about combining compression and encryption + - security: describe chunk size / proximity issue, #3687 + - quickstart: add note about permissions, borg@localhost, #3452 + - quickstart: add introduction to repositories & archives, #3620 + - recreate --recompress: add missing metavar, clarify description, #3617 + - improve logging docs, #3549 + - add an example for --pattern usage, #3661 + - clarify path semantics when matching, #3598 + - link to offline documentation from README, #3502 + - add docs on how to verify a signed release with GPG, #3634 + - chunk seed is generated per repository (not: archive) + - better formatting of CPU usage documentation, #3554 + - extend append-only repo rollback docs, #3579 +- tests: + + - fix erroneously skipped zstd compressor tests, #3606 + - skip a test if argparse is broken, #3705 +- vagrant: + + - xenial64 box now uses username 'vagrant', #3707 + - move cleanup steps to fs_init, #3706 + - the boxcutter wheezy boxes are 404, use local ones + - update to Python 3.5.5 (for binary builds) + + +Version 1.1.4 (2017-12-31) +-------------------------- + +Compatibility notes: + +- When upgrading from borg 1.0.x to 1.1.x, please note: + + - read all the compatibility notes for 1.1.0*, starting from 1.1.0b1. + - borg upgrade: you do not need to and you also should not run it. + - borg might ask some security-related questions once after upgrading. + You can answer them either manually or via environment variable. + One known case is if you use unencrypted repositories, then it will ask + about a unknown unencrypted repository one time. + - your first backup with 1.1.x might be significantly slower (it might + completely read, chunk, hash a lot files) - this is due to the + --files-cache mode change (and happens every time you change mode). + You can avoid the one-time slowdown by using the pre-1.1.0rc4-compatible + mode (but that is less safe for detecting changed files than the default). + See the --files-cache docs for details. +- borg 1.1.4 changes: + + - zstd compression is new in borg 1.1.4, older borg can't handle it. + - new minimum requirements for the compression libraries - if the required + versions (header and lib) can't be found at build time, bundled code will + be used: + + - added requirement: libzstd >= 1.3.0 (bundled: 1.3.2) + - updated requirement: liblz4 >= 1.7.0 / r129 (bundled: 1.8.0) + +Fixes: + +- check: data corruption fix: fix for borg check --repair malfunction, #3444. + See the more detailed notes close to the top of this document. +- delete: also delete security dir when deleting a repo, #3427 +- prune: fix building the "borg prune" man page, #3398 +- init: use given --storage-quota for local repo, #3470 +- init: properly quote repo path in output +- fix startup delay with dns-only own fqdn resolving, #3471 + +New features: + +- added zstd compression. try it! +- added placeholder {reverse-fqdn} for fqdn in reverse notation +- added BORG_BASE_DIR environment variable, #3338 + +Other changes: + +- list help topics when invalid topic is requested +- fix lz4 deprecation warning, requires lz4 >= 1.7.0 (r129) +- add parens for C preprocessor macro argument usages (did not cause malfunction) +- exclude broken pytest 3.3.0 release +- updated fish/bash completions +- init: more clear exception messages for borg create, #3465 +- docs: + + - add auto-generated docs for borg config + - don't generate HTML docs page for borgfs, #3404 + - docs update for lz4 b2 zstd changes + - add zstd to compression help, readme, docs + - update requirements and install docs about bundled lz4 and zstd +- refactored build of the compress and crypto.low_level extensions, #3415: + + - move some lib/build related code to setup_{zstd,lz4,b2}.py + - bundle lz4 1.8.0 (requirement: >= 1.7.0 / r129) + - bundle zstd 1.3.2 (requirement: >= 1.3.0) + - blake2 was already bundled + - rename BORG_LZ4_PREFIX env var to BORG_LIBLZ4_PREFIX for better consistency: + we also have BORG_LIBB2_PREFIX and BORG_LIBZSTD_PREFIX now. + - add prefer_system_lib* = True settings to setup.py - by default the build + will prefer a shared library over the bundled code, if library and headers + can be found and meet the minimum requirements. + + +Version 1.1.3 (2017-11-27) +-------------------------- + +Fixes: + +- Security Fix for CVE-2017-15914: Incorrect implementation of access controls + allows remote users to override repository restrictions in Borg servers. + A user able to access a remote Borg SSH server is able to circumvent access + controls post-authentication. + Affected releases: 1.1.0, 1.1.1, 1.1.2. Releases 1.0.x are NOT affected. +- crc32: deal with unaligned buffer, add tests - this broke borg on older ARM + CPUs that can not deal with unaligned 32bit memory accesses and raise a bus + error in such cases. the fix might also improve performance on some CPUs as + all 32bit memory accesses by the crc32 code are properly aligned now. #3317 +- mount: fixed support of --consider-part-files and do not show .borg_part_N + files by default in the mounted FUSE filesystem. #3347 +- fixed cache/repo timestamp inconsistency message, highlight that information + is obtained from security dir (deleting the cache will not bypass this error + in case the user knows this is a legitimate repo). +- borgfs: don't show sub-command in borgfs help, #3287 +- create: show an error when --dry-run and --stats are used together, #3298 + +New features: + +- mount: added exclusion group options and paths, #2138 + + Reused some code to support similar options/paths as borg extract offers - + making good use of these to only mount a smaller subset of dirs/files can + speed up mounting a lot and also will consume way less memory. + + borg mount [options] repo_or_archive mountpoint path [paths...] + + paths: you can just give some "root paths" (like for borg extract) to + only partially populate the FUSE filesystem. + + new options: --exclude[-from], --pattern[s-from], --strip-components +- create/extract: support st_birthtime on platforms supporting it, #3272 +- add "borg config" command for querying/setting/deleting config values, #3304 + +Other changes: + +- clean up and simplify packaging (only package committed files, do not install + .c/.h/.pyx files) +- docs: + + - point out tuning options for borg create, #3239 + - add instructions for using ntfsclone, zerofree, #81 + - move image backup-related FAQ entries to a new page + - clarify key aliases for borg list --format, #3111 + - mention break-lock in checkpointing FAQ entry, #3328 + - document sshfs rename workaround, #3315 + - add FAQ about removing files from existing archives + - add FAQ about different prune policies + - usage and man page for borgfs, #3216 + - clarify create --stats duration vs. wall time, #3301 + - clarify encrypted key format for borg key export, #3296 + - update release checklist about security fixes + - document good and problematic option placements, fix examples, #3356 + - add note about using --nobsdflags to avoid speed penalty related to + bsdflags, #3239 + - move most of support section to www.borgbackup.org + + +Version 1.1.2 (2017-11-05) +-------------------------- + +Fixes: + +- fix KeyError crash when talking to borg server < 1.0.7, #3244 +- extract: set bsdflags last (include immutable flag), #3263 +- create: don't do stat() call on excluded-norecurse directory, fix exception + handling for stat() call, #3209 +- create --stats: do not count data volume twice when checkpointing, #3224 +- recreate: move chunks_healthy when excluding hardlink master, #3228 +- recreate: get rid of chunks_healthy when rechunking (does not match), #3218 +- check: get rid of already existing not matching chunks_healthy metadata, #3218 +- list: fix stdout broken pipe handling, #3245 +- list/diff: remove tag-file options (not used), #3226 + +New features: + +- bash, zsh and fish shell auto-completions, see scripts/shell_completions/ +- added BORG_CONFIG_DIR env var, #3083 + +Other changes: + +- docs: + + - clarify using a blank passphrase in keyfile mode + - mention "!" (exclude-norecurse) type in "patterns" help + - document to first heal before running borg recreate to re-chunk stuff, + because that will have to get rid of chunks_healthy metadata. + - more than 23 is not supported for CHUNK_MAX_EXP, #3115 + - borg does not respect nodump flag by default any more + - clarify same-filesystem requirement for borg upgrade, #2083 + - update / rephrase cygwin / WSL status, #3174 + - improve docs about --stats, #3260 +- vagrant: openindiana new clang package + +Already contained in 1.1.1 (last minute fix): + +- arg parsing: fix fallback function, refactor, #3205. This is a fixup + for #3155, which was broken on at least python <= 3.4.2. + + +Version 1.1.1 (2017-10-22) +-------------------------- + +Compatibility notes: + +- The deprecated --no-files-cache is not a global/common option any more, + but only available for borg create (it is not needed for anything else). + Use --files-cache=disabled instead of --no-files-cache. +- The nodump flag ("do not backup this file") is not honoured any more by + default because this functionality (esp. if it happened by error or + unexpected) was rather confusing and unexplainable at first to users. + If you want that "do not backup NODUMP-flagged files" behaviour, use: + borg create --exclude-nodump ... +- If you are on Linux and do not need bsdflags archived, consider using + ``--nobsdflags`` with ``borg create`` to avoid additional syscalls and + speed up backup creation. + +Fixes: + +- borg recreate: correctly compute part file sizes. fixes cosmetic, but + annoying issue as borg check complains about size inconsistencies of part + files in affected archives. you can solve that by running borg recreate on + these archives, see also #3157. +- bsdflags support: do not open BLK/CHR/LNK files, avoid crashes and + slowness, #3130 +- recreate: don't crash on attic archives w/o time_end, #3109 +- don't crash on repository filesystems w/o hardlink support, #3107 +- don't crash in first part of truncate_and_unlink, #3117 +- fix server-side IndexError crash with clients < 1.0.7, #3192 +- don't show traceback if only a global option is given, show help, #3142 +- cache: use SaveFile for more safety, #3158 +- init: fix wrong encryption choices in command line parser, fix missing + "authenticated-blake2", #3103 +- move --no-files-cache from common to borg create options, #3146 +- fix detection of non-local path (failed on ..filename), #3108 +- logging with fileConfig: set json attr on "borg" logger, #3114 +- fix crash with relative BORG_KEY_FILE, #3197 +- show excluded dir with "x" for tagged dirs / caches, #3189 + +New features: + +- create: --nobsdflags and --exclude-nodump options, #3160 +- extract: --nobsdflags option, #3160 + +Other changes: + +- remove annoying hardlinked symlinks warning, #3175 +- vagrant: use self-made FreeBSD 10.3 box, #3022 +- travis: don't brew update, hopefully fixes #2532 +- docs: + + - readme: -e option is required in borg 1.1 + - add example showing --show-version --show-rc + - use --format rather than --list-format (deprecated) in example + - update docs about hardlinked symlinks limitation + + +Version 1.1.0 (2017-10-07) +-------------------------- + +Compatibility notes: + +- borg command line: do not put options in between positional arguments + + This sometimes works (e.g. it worked in borg 1.0.x), but can easily stop + working if we make positional arguments optional (like it happened for + borg create's "paths" argument in 1.1). There are also places in borg 1.0 + where we do that, so it doesn't work there in general either. #3356 + + Good: borg create -v --stats repo::archive path + Good: borg create repo::archive path -v --stats + Bad: borg create repo::archive -v --stats path + +Fixes: + +- fix LD_LIBRARY_PATH restoration for subprocesses, #3077 +- "auto" compression: make sure expensive compression is actually better, + otherwise store lz4 compressed data we already computed. + +Other changes: + +- docs: + + - FAQ: we do not implement futile attempts of ETA / progress displays + - manpage: fix typos, update homepage + - implement simple "issue" role for manpage generation, #3075 + + +Version 1.1.0rc4 (2017-10-01) +----------------------------- + +Compatibility notes: + +- A borg server >= 1.1.0rc4 does not support borg clients 1.1.0b3-b5. #3033 +- The files cache is now controlled differently and has a new default mode: + + - the files cache now uses ctime by default for improved file change + detection safety. You can still use mtime for more speed and less safety. + - --ignore-inode is deprecated (use --files-cache=... without "inode") + - --no-files-cache is deprecated (use --files-cache=disabled) + +New features: + +- --files-cache - implement files cache mode control, #911 + You can now control the files cache mode using this option: + --files-cache={ctime,mtime,size,inode,rechunk,disabled} + (only some combinations are supported). See the docs for details. + +Fixes: + +- remote progress/logging: deal with partial lines, #2637 +- remote progress: flush json mode output +- fix subprocess environments, #3050 (and more) + +Other changes: + +- remove client_supports_log_v3 flag, #3033 +- exclude broken Cython 0.27(.0) in requirements, #3066 +- vagrant: + + - upgrade to FUSE for macOS 3.7.1 + - use Python 3.5.4 to build the binaries +- docs: + + - security: change-passphrase only changes the passphrase, #2990 + - fixed/improved borg create --compression examples, #3034 + - add note about metadata dedup and --no[ac]time, #2518 + - twitter account @borgbackup now, better visible, #2948 + - simplified rate limiting wrapper in FAQ + + +Version 1.1.0rc3 (2017-09-10) +----------------------------- + +New features: + +- delete: support naming multiple archives, #2958 + +Fixes: + +- repo cleanup/write: invalidate cached FDs, #2982 +- fix datetime.isoformat() microseconds issues, #2994 +- recover_segment: use mmap(), lower memory needs, #2987 + +Other changes: + +- with-lock: close segment file before invoking subprocess +- keymanager: don't depend on optional readline module, #2976 +- docs: + + - fix macOS keychain integration command + - show/link new screencasts in README, #2936 + - document utf-8 locale requirement for json mode, #2273 +- vagrant: clean up shell profile init, user name, #2977 +- test_detect_attic_repo: don't test mount, #2975 +- add debug logging for repository cleanup + + +Version 1.1.0rc2 (2017-08-28) +----------------------------- + +Compatibility notes: + +- list: corrected mix-up of "isomtime" and "mtime" formats. Previously, + "isomtime" was the default but produced a verbose human format, + while "mtime" produced a ISO-8601-like format. + The behaviours have been swapped (so "mtime" is human, "isomtime" is ISO-like), + and the default is now "mtime". + "isomtime" is now a real ISO-8601 format ("T" between date and time, not a space). + +New features: + +- None. + +Fixes: + +- list: fix weird mixup of mtime/isomtime +- create --timestamp: set start time, #2957 +- ignore corrupt files cache, #2939 +- migrate locks to child PID when daemonize is used +- fix exitcode of borg serve, #2910 +- only compare contents when chunker params match, #2899 +- umount: try fusermount, then try umount, #2863 + +Other changes: + +- JSON: use a more standard ISO 8601 datetime format, #2376 +- cache: write_archive_index: truncate_and_unlink on error, #2628 +- detect non-upgraded Attic repositories, #1933 +- delete various nogil and threading related lines +- coala / pylint related improvements +- docs: + + - renew asciinema/screencasts, #669 + - create: document exclusion through nodump, #2949 + - minor formatting fixes + - tar: tarpipe example + - improve "with-lock" and "info" docs, #2869 + - detail how to use macOS/GNOME/KDE keyrings for repo passwords, #392 +- travis: only short-circuit docs-only changes for pull requests +- vagrant: + + - netbsd: bash is already installed + - fix netbsd version in PKG_PATH + - add exe location to PATH when we build an exe + + Version 1.1.0rc1 (2017-07-24) ----------------------------- @@ -1035,7 +2200,7 @@ Other changes: - pass meta-data around, #765 - move some constants to new constants module - - better readability and less errors with namedtuples, #823 + - better readability and fewer errors with namedtuples, #823 - moved source tree into src/ subdirectory, #1016 - made borg.platform a package, #1113 - removed dead crypto code, #1032 @@ -1119,8 +2284,8 @@ Bug fixes: Fixes a chmod/chown/chgrp/unlink/rename/... crash race between getting dirents and dispatching to process_symlink. - yes(): abort on wrong answers, saying so, #1622 -- fixed exception borg serve raised when connection was closed before reposiory - was openend. add an error message for this. +- fixed exception borg serve raised when connection was closed before repository + was opened. Add an error message for this. - fix read-from-closed-FD issue, #1551 (this seems not to get triggered in 1.0.x, but was discovered in master) - hashindex: fix iterators (always raise StopIteration when exhausted) @@ -1623,7 +2788,7 @@ Bug fixes: - do not sleep for >60s while waiting for lock, #773 - unpack file stats before passing to FUSE - fix build on illumos -- don't try to backup doors or event ports (Solaris and derivates) +- don't try to backup doors or event ports (Solaris and derivatives) - remove useless/misleading libc version display, #738 - test suite: reset exit code of persistent archiver, #844 - RemoteRepository: clean up pipe if remote open() fails @@ -2401,7 +3566,7 @@ Version 0.23.0 (2015-06-11) Incompatible changes (compared to attic, fork related): - changed sw name and cli command to "borg", updated docs -- package name (and name in urls) uses "borgbackup" to have less collisions +- package name (and name in urls) uses "borgbackup" to have fewer collisions - changed repo / cache internal magic strings from ATTIC* to BORG*, changed cache location to .cache/borg/ - this means that it currently won't accept attic repos (see issue #21 about improving that) diff --git a/docs/conf.py b/docs/conf.py index 283a2c7ac9..acfb00d66a 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -42,7 +42,7 @@ # General information about the project. project = 'Borg - Deduplicating Archiver' -copyright = '2010-2014 Jonas Borgström, 2015-2017 The Borg Collective (see AUTHORS file)' +copyright = u'2010-2014 Jonas Borgström, 2015-2021 The Borg Collective (see AUTHORS file)' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/docs/deployment.rst b/docs/deployment.rst index 7b1caf9255..3d4605b2bb 100644 --- a/docs/deployment.rst +++ b/docs/deployment.rst @@ -12,3 +12,5 @@ This chapter details deployment strategies for the following scenarios. deployment/central-backup-server deployment/hosting-repositories deployment/automated-local + deployment/image-backup + deployment/pull-backup diff --git a/docs/deployment/automated-local.rst b/docs/deployment/automated-local.rst index a64cec2352..cbc39e5cc3 100644 --- a/docs/deployment/automated-local.rst +++ b/docs/deployment/automated-local.rst @@ -137,7 +137,6 @@ modify it to suit your needs (e.g. more backup sets, dumping databases etc.). # This is just an example, change it however you see fit borg create $BORG_OPTS \ --exclude /root/.cache \ - --exclude /var/cache \ --exclude /var/lib/docker/devicemapper \ $TARGET::$DATE-$$-system \ / /boot diff --git a/docs/deployment/central-backup-server.rst b/docs/deployment/central-backup-server.rst index 26e718985e..431bd531e9 100644 --- a/docs/deployment/central-backup-server.rst +++ b/docs/deployment/central-backup-server.rst @@ -91,16 +91,22 @@ Client The client needs to initialize the `pictures` repository like this: +:: + borg init backup@backup01.srv.local:pictures Or with the full path (should actually never be used, as only for demonstrational purposes). The server should automatically change the current working directory to the `` folder. +:: + borg init backup@backup01.srv.local:/home/backup/repos/johndoe.clnt.local/pictures When `johndoe.clnt.local` tries to access a not restricted path the following error is raised. John Doe tries to backup into the Web 01 path: +:: + borg init backup@backup01.srv.local:/home/backup/repos/web01.srv.local/pictures :: @@ -114,13 +120,7 @@ Ansible ------- Ansible takes care of all the system-specific commands to add the user, create the -folder. Even when the configuration is changed the repository server configuration is -satisfied and reproducible. - -Automate setting up an repository server with the user, group, folders and -permissions a Ansible playbook could be used. Keep in mind the playbook -uses the Arch Linux `pacman `_ -package manager to install and keep borg up-to-date. +folder, install and configure software. :: @@ -138,7 +138,7 @@ package manager to install and keep borg up-to-date. - host: app01.clnt.local key: "{{ lookup('file', '/path/to/keys/app01.clnt.local.pub') }}" tasks: - - pacman: name=borg state=latest update_cache=yes + - package: name=borg state=present - group: name="{{ group }}" state=present - user: name="{{ user }}" shell=/bin/bash home="{{ home }}" createhome=yes group="{{ group }}" groups= state=present - file: path="{{ home }}" owner="{{ user }}" group="{{ group }}" mode=0700 state=directory @@ -171,8 +171,6 @@ Salt running on a Debian system. - openssl - libacl1-dev - libacl1 - - liblz4-dev - - liblz4-1 - build-essential - libfuse-dev - fuse @@ -216,6 +214,6 @@ and no other interpreter or application has to be deployed. See also -------- -* `SSH Daemon manpage `_ +* `SSH Daemon manpage `_ * `Ansible `_ * `Salt `_ diff --git a/docs/deployment/hosting-repositories.rst b/docs/deployment/hosting-repositories.rst index 6e3161ee50..9a2eacd038 100644 --- a/docs/deployment/hosting-repositories.rst +++ b/docs/deployment/hosting-repositories.rst @@ -1,5 +1,6 @@ .. include:: ../global.rst.inc .. highlight:: none +.. _hosting_repositories: Hosting repositories ==================== @@ -42,13 +43,13 @@ SSH access to safe operations only. with no control characters and that it consists of three parts separated by a single space. Ensure that no newlines are contained within the key. -The `restrict` keyword enables all restrictions, i.e. disables port, agent +The ``restrict`` keyword enables all restrictions, i.e. disables port, agent and X11 forwarding, as well as disabling PTY allocation and execution of ~/.ssh/rc. If any future restriction capabilities are added to authorized_keys files they will be included in this set. -The `command` keyword forces execution of the specified command line -upon login. This must be ``borg serve``. The `--restrict-to-repository` +The ``command`` keyword forces execution of the specified command line +upon login. This must be ``borg serve``. The ``--restrict-to-repository`` option permits access to exactly **one** repository. It can be given multiple times to permit access to more than one repository. @@ -69,5 +70,15 @@ support storage quotas. Refer to :ref:`internals_storage_quota` for more details on storage quotas. -Refer to the `sshd(8) `_ +**Specificities: Append-only repositories** + +Running ``borg init`` via a ``borg serve --append-only`` server will **not** +create a repository that is configured to be append-only by its repository +config. + +But, ``--append-only`` arguments in ``authorized_keys`` will override the +repository config, therefore append-only mode can be enabled on a key by key +basis. + +Refer to the `sshd(8) `_ man page for more details on SSH options. diff --git a/docs/deployment/image-backup.rst b/docs/deployment/image-backup.rst new file mode 100644 index 0000000000..fd684c3f2d --- /dev/null +++ b/docs/deployment/image-backup.rst @@ -0,0 +1,119 @@ +.. include:: ../global.rst.inc +.. highlight:: none + +Backing up entire disk images +============================= + +Backing up disk images can still be efficient with Borg because its `deduplication`_ +technique makes sure only the modified parts of the file are stored. Borg also has +optional simple sparse file support for extract. + +Decreasing the size of image backups +------------------------------------ + +Disk images are as large as the full disk when uncompressed and might not get much +smaller post-deduplication after heavy use because virtually all file systems don't +actually delete file data on disk but instead delete the filesystem entries referencing +the data. Therefore, if a disk nears capacity and files are deleted again, the change +will barely decrease the space it takes up when compressed and deduplicated. Depending +on the filesystem, there are several ways to decrease the size of a disk image: + +Using ntfsclone (NTFS, i.e. Windows VMs) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``ntfsclone`` can only operate on filesystems with the journal cleared (i.e. turned-off +machines), which somewhat limits its utility in the case of VM snapshots. However, when +it can be used, its special image format is even more efficient than just zeroing and +deduplicating. For backup, save the disk header and the contents of each partition:: + + HEADER_SIZE=$(sfdisk -lo Start $DISK | grep -A1 -P 'Start$' | tail -n1 | xargs echo) + PARTITIONS=$(sfdisk -lo Device,Type $DISK | sed -e '1,/Device\s*Type/d') + dd if=$DISK count=$HEADER_SIZE | borg create repo::hostname-partinfo - + echo "$PARTITIONS" | grep NTFS | cut -d' ' -f1 | while read x; do + PARTNUM=$(echo $x | grep -Eo "[0-9]+$") + ntfsclone -so - $x | borg create repo::hostname-part$PARTNUM - + done + # to backup non-NTFS partitions as well: + echo "$PARTITIONS" | grep -v NTFS | cut -d' ' -f1 | while read x; do + PARTNUM=$(echo $x | grep -Eo "[0-9]+$") + borg create --read-special repo::hostname-part$PARTNUM $x + done + +Restoration is a similar process:: + + borg extract --stdout repo::hostname-partinfo | dd of=$DISK && partprobe + PARTITIONS=$(sfdisk -lo Device,Type $DISK | sed -e '1,/Device\s*Type/d') + borg list --format {archive}{NL} repo | grep 'part[0-9]*$' | while read x; do + PARTNUM=$(echo $x | grep -Eo "[0-9]+$") + PARTITION=$(echo "$PARTITIONS" | grep -E "$DISKp?$PARTNUM" | head -n1) + if echo "$PARTITION" | cut -d' ' -f2- | grep -q NTFS; then + borg extract --stdout repo::$x | ntfsclone -rO $(echo "$PARTITION" | cut -d' ' -f1) - + else + borg extract --stdout repo::$x | dd of=$(echo "$PARTITION" | cut -d' ' -f1) + fi + done + +.. note:: + + When backing up a disk image (as opposed to a real block device), mount it as + a loopback image to use the above snippets:: + + DISK=$(losetup -Pf --show /path/to/disk/image) + # do backup as shown above + losetup -d $DISK + +Using zerofree (ext2, ext3, ext4) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +``zerofree`` works similarly to ntfsclone in that it zeros out unused chunks of the FS, +except it works in place, zeroing the original partition. This makes the backup process +a bit simpler:: + + sfdisk -lo Device,Type $DISK | sed -e '1,/Device\s*Type/d' | grep Linux | cut -d' ' -f1 | xargs -n1 zerofree + borg create --read-special repo::hostname-disk $DISK + +Because the partitions were zeroed in place, restoration is only one command:: + + borg extract --stdout repo::hostname-disk | dd of=$DISK + +.. note:: The "traditional" way to zero out space on a partition, especially one already + mounted, is to simply ``dd`` from ``/dev/zero`` to a temporary file and delete + it. This is ill-advised for the reasons mentioned in the ``zerofree`` man page: + + - it is slow + - it makes the disk image (temporarily) grow to its maximal extent + - it (temporarily) uses all free space on the disk, so other concurrent write actions may fail. + +Virtual machines +---------------- + +If you use non-snapshotting backup tools like Borg to back up virtual machines, then +the VMs should be turned off for the duration of the backup. Backing up live VMs can +(and will) result in corrupted or inconsistent backup contents: a VM image is just a +regular file to Borg with the same issues as regular files when it comes to concurrent +reading and writing from the same file. + +For backing up live VMs use filesystem snapshots on the VM host, which establishes +crash-consistency for the VM images. This means that with most file systems (that +are journaling) the FS will always be fine in the backup (but may need a journal +replay to become accessible). + +Usually this does not mean that file *contents* on the VM are consistent, since file +contents are normally not journaled. Notable exceptions are ext4 in data=journal mode, +ZFS and btrfs (unless nodatacow is used). + +Applications designed with crash-consistency in mind (most relational databases like +PostgreSQL, SQLite etc. but also for example Borg repositories) should always be able +to recover to a consistent state from a backup created with crash-consistent snapshots +(even on ext4 with data=writeback or XFS). Other applications may require a lot of work +to reach application-consistency; it's a broad and complex issue that cannot be explained +in entirety here. + +Hypervisor snapshots capturing most of the VM's state can also be used for backups and +can be a better alternative to pure file system based snapshots of the VM's disk, since +no state is lost. Depending on the application this can be the easiest and most reliable +way to create application-consistent backups. + +Borg doesn't intend to address these issues due to their huge complexity and +platform/software dependency. Combining Borg with the mechanisms provided by the platform +(snapshots, hypervisor features) will be the best approach to start tackling them. \ No newline at end of file diff --git a/docs/deployment/pull-backup.rst b/docs/deployment/pull-backup.rst new file mode 100644 index 0000000000..d923cf2f37 --- /dev/null +++ b/docs/deployment/pull-backup.rst @@ -0,0 +1,282 @@ +.. include:: ../global.rst.inc +.. highlight:: none +.. _pull_backup: + +Backing up in pull mode +======================= + +Assuming you have a pull backup system set up with borg, where a backup server +pulls the data from the target via SSHFS. In this mode, the backup client's file +system is mounted remotely on the backup server. Pull mode is even possible if +the SSH connection must be established by the client via a remote tunnel. Other +network file systems like NFS or SMB could be used as well, but SSHFS is very +simple to set up and probably the most secure one. + +There are some restrictions caused by SSHFS. For example, unless you define UID +and GID mappings when mounting via ``sshfs``, owners and groups of the mounted +file system will probably change, and you may not have access to those files if +BorgBackup is not run with root privileges. + +SSHFS is a FUSE file system and uses the SFTP protocol, so there may be also +other unsupported features that the actual implementations of ssfs, libfuse and +sftp on the backup server do not support, like file name encodings, ACLs, xattrs +or bsdflags. So there is no guarantee that you are able to restore a system +completely in every aspect from such a backup. + +.. warning:: + + To mount the client's root file system you will need root access to the + client. This contradicts to the usual threat model of BorgBackup, where + clients don't need to trust the backup server (data is encrypted). In pull + mode the server (when logged in as root) could cause unlimited damage to the + client. Therefore, pull mode should be used only from servers you do fully + trust! + +Creating a backup +----------------- + +Generally, in a pull backup situation there is no direct way for borg to know +the client's original UID:GID name mapping of files, because Borg would use +``/etc/passwd`` and ``/etc/group`` of the backup server to map the names. To +derive the right names, Borg needs to have access to the client's passwd and +group files and use them in the backup process. + +The solution to this problem is chrooting into an sshfs mounted directory. In +this example the whole client root file system is mounted. We use the +stand-alone BorgBackup executable and copy it into the mounted file system to +make Borg available after entering chroot; this can be skipped if Borg is +already installed on the client. + +:: + + # Mount client root file system. + mkdir /tmp/sshfs + sshfs root@host:/ /tmp/sshfs + # Mount BorgBackup repository inside it. + mkdir /tmp/sshfs/borgrepo + mount --bind /path/to/repo /tmp/sshfs/borgrepo + # Make borg executable available. + cp /usr/local/bin/borg /tmp/sshfs/usr/local/bin/borg + # Mount important system directories and enter chroot. + cd /tmp/sshfs + for i in dev proc sys; do mount --bind /$i $i; done + chroot /tmp/sshfs + +Now we are on the backup system but inside a chroot with the client's root file +system. We have a copy of Borg binary in ``/usr/local/bin`` and the repository +in ``/borgrepo``. Borg will back up the client's user/group names, and we can +create the backup, retaining the original paths, excluding the repository: + +:: + + borg create --exclude /borgrepo --files-cache ctime,size /borgrepo::archive / + +For the sake of simplicity only ``/borgrepo`` is excluded here. You may want to +set up an exclude file with additional files and folders to be excluded. Also +note that we have to modify Borg's file change detection behaviour – SSHFS +cannot guarantee stable inode numbers, so we have to supply the +``--files-cache`` option. + +Finally, we need to exit chroot, unmount all the stuff and clean up: + +:: + + exit # exit chroot + rm /tmp/sshfs/usr/local/bin/borg + cd /tmp/sshfs + for i in dev proc sys borgrepo; do umount ./$i; done + rmdir borgrepo + cd ~ + umount /tmp/sshfs + rmdir /tmp/sshfs + +Thanks to secuser on IRC for this how-to! + +Restore methods +--------------- + +The counterpart of a pull backup is a push restore. Depending on the type of +restore – full restore or partial restore – there are different methods to make +sure the correct IDs are restored. + +Partial restore +~~~~~~~~~~~~~~~ + +In case of a partial restore, using the archived UIDs/GIDs might lead to wrong +results if the name-to-ID mapping on the target system has changed compared to +backup time (might be the case e.g. for a fresh OS install). + +The workaround again is chrooting into an sshfs mounted directory, so Borg is +able to map the user/group names of the backup files to the actual IDs on the +client. This example is similar to the backup above – only the Borg command is +different: + +:: + + # Mount client root file system. + mkdir /tmp/sshfs + sshfs root@host:/ /tmp/sshfs + # Mount BorgBackup repository inside it. + mkdir /tmp/sshfs/borgrepo + mount --bind /path/to/repo /tmp/sshfs/borgrepo + # Make borg executable available. + cp /usr/local/bin/borg /tmp/sshfs/usr/local/bin/borg + # Mount important system directories and enter chroot. + cd /tmp/sshfs + for i in dev proc sys; do mount --bind /$i $i; done + chroot /tmp/sshfs + +Now we can run + +:: + + borg extract /borgrepo::archive PATH + +to partially restore whatever we like. Finally, do the clean-up: + +:: + + exit # exit chroot + rm /tmp/sshfs/usr/local/bin/borg + cd /tmp/sshfs + for i in dev proc sys borgrepo; do umount ./$i; done + rmdir borgrepo + cd ~ + umount /tmp/sshfs + rmdir /tmp/sshfs + +Full restore +~~~~~~~~~~~~ + +When doing a full restore, we restore all files (including the ones containing +the ID-to-name mapping, ``/etc/passwd`` and ``/etc/group``). Everything will be +consistent automatically if we restore the numeric IDs stored in the archive. So +there is no need for a chroot environment; we just mount the client file system +and extract a backup, utilizing the ``--numeric-owner`` option: + +:: + + sshfs root@host:/ /mnt/sshfs + cd /mnt/sshfs + borg extract --numeric-owner /path/to/repo::archive + cd ~ + umount /mnt/sshfs + +Simple (lossy) full restore +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Using ``borg export-tar`` it is possible to stream a backup to the client and +directly extract it without the need of mounting with SSHFS: + +:: + + borg export-tar /path/to/repo::archive - | ssh root@host 'tar -C / -x' + +Note that in this scenario the tar format is the limiting factor – it cannot +restore all the advanced features that BorgBackup supports. See +:ref:`borg_export-tar` for limitations. + +ssh-agent +========= + +In this scenario *borg-server* initiates an SSH connection to *borg-client* and forwards the authentication +agent connection. + +After that, it works similar to the push mode: +*borg-client* initiates another SSH connection back to *borg-server* using the forwarded authentication agent +connection to authenticate itself, starts ``borg serve`` and communicates with it. + +Using this method requires ssh access of user *borgs* to *borgc@borg-client*, where: + +* *borgs* is the user on the server side with read/write access to local borg repository. +* *borgc* is the user on the client side with read access to files meant to be backed up. + +Applying this method for automated backup operations +---------------------------------------------------- + +Assume that the borg-client host is untrusted. +Therefore we do some effort to prevent a hostile user on the borg-client side to do something harmful. +In case of a fully trusted borg-client the method could be simplified. + +Preparing the server side +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Do this once for each client on *borg-server* to allow *borgs* to connect itself on *borg-server* using a +dedicated ssh key: + +:: + + borgs@borg-server$ install -m 700 -d ~/.ssh/ + borgs@borg-server$ ssh-keygen -N '' -t rsa -f ~/.ssh/borg-client_key + borgs@borg-server$ { echo -n 'command="borg serve --append-only --restrict-to-repo ~/repo",restrict '; cat ~/.ssh/borg-client_key.pub; } >> ~/.ssh/authorized_keys + borgs@borg-server$ chmod 600 ~/.ssh/authorized_keys + +``install -m 700 -d ~/.ssh/`` + + Create directory ~/.ssh with correct permissions if it does not exist yet. + +``ssh-keygen -N '' -t rsa -f ~/.ssh/borg-client_key`` + + Create an ssh key dedicated to communication with borg-client. + +.. note:: + Another more complex approach is using a unique ssh key for each pull operation. + This is more secure as it guarantees that the key will not be used for other purposes. + +``{ echo -n 'command="borg serve --append-only --restrict-to-repo ~/repo",restrict '; cat ~/.ssh/borg-client_key.pub; } >> ~/.ssh/authorized_keys`` + + Add borg-client's ssh public key to ~/.ssh/authorized_keys with forced command and restricted mode. + The borg client is restricted to use one repo at the specified path and to append-only operation. + Commands like *delete*, *prune* and *compact* have to be executed another way, for example directly on *borg-server* + side or from a privileged, less restricted client (using another authorized_keys entry). + +``chmod 600 ~/.ssh/authorized_keys`` + + Fix permissions of ~/.ssh/authorized_keys. + +Pull operation +~~~~~~~~~~~~~~ + +Initiating borg command execution from *borg-server* (e.g. init):: + + borgs@borg-server$ ( + eval $(ssh-agent) > /dev/null + ssh-add -q ~/.ssh/borg-client_key + echo 'your secure borg key passphrase' | \ + ssh -A -o StrictHostKeyChecking=no borgc@borg-client "BORG_PASSPHRASE=\$(cat) borg --rsh 'ssh -o StrictHostKeyChecking=no' init --encryption repokey ssh://borgs@borg-server/~/repo" + kill "${SSH_AGENT_PID}" + ) + +Parentheses around commands are needed to avoid interference with a possibly already running ssh-agent. +Parentheses are not needed when using a dedicated bash process. + +``eval $(ssh-agent) > /dev/null`` + + Run the SSH agent in the background and export related environment variables to the current bash session. + +``ssh-add -q ~/.ssh/borg-client_key`` + + Load the SSH private key dedicated to communication with the borg-client into the SSH agent. + Look at ``man 1 ssh-add`` for a more detailed explanation. + +.. note:: + Care needs to be taken when loading keys into the SSH agent. Users on the *borg-client* having read/write permissions + to the agent's UNIX-domain socket (at least borgc and root in our case) can access the agent on *borg-server* through + the forwarded connection and can authenticate using any of the identities loaded into the agent + (look at ``man 1 ssh`` for more detailed explanation). Therefore there are some security considerations: + + * Private keys loaded into the agent must not be used to enable access anywhere else. + * The keys meant to be loaded into the agent must be specified explicitly, not from default locations. + * The *borg-client*'s entry in *borgs@borg-server:~/.ssh/authorized_keys* must be as restrictive as possible. + +``echo 'your secure borg key passphrase' | ssh -A -o StrictHostKeyChecking=no borgc@borg-client "BORG_PASSPHRASE=\$(cat) borg --rsh 'ssh -o StrictHostKeyChecking=no' init --encryption repokey ssh://borgs@borg-server/~/repo"`` + + Run the *borg init* command on *borg-client*. + + *ssh://borgs@borg-server/~/repo* refers to the repository *repo* within borgs's home directory on *borg-server*. + + *StrictHostKeyChecking=no* is used to automatically add host keys to *~/.ssh/known_hosts* without user intervention. + +``kill "${SSH_AGENT_PID}"`` + + Kill ssh-agent with loaded keys when it is not needed anymore. diff --git a/docs/development.rst b/docs/development.rst index 37cc9ad68d..48c531cbc8 100644 --- a/docs/development.rst +++ b/docs/development.rst @@ -17,59 +17,59 @@ Contributions Some guidance for contributors: -- discuss about changes on github issue tracker, IRC or mailing list +- Discuss changes on the GitHub issue tracker, on IRC or on the mailing list. -- make your PRs on the ``master`` branch (see `Branching Model`_ for details) +- Make your PRs on the ``master`` branch (see `Branching Model`_ for details). -- do clean changesets: +- Do clean changesets: - - focus on some topic, resist changing anything else. - - do not do style changes mixed with functional changes. - - try to avoid refactorings mixed with functional changes. - - if you need to fix something after commit/push: + - Focus on some topic, resist changing anything else. + - Do not do style changes mixed with functional changes. + - Try to avoid refactorings mixed with functional changes. + - If you need to fix something after commit/push: - - if there are ongoing reviews: do a fixup commit you can - merge into the bad commit later. - - if there are no ongoing reviews or you did not push the - bad commit yet: edit the commit to include your fix or + - If there are ongoing reviews: do a fixup commit you can + squash into the bad commit later. + - If there are no ongoing reviews or you did not push the + bad commit yet: amend the commit to include your fix or merge the fixup commit before pushing. - - have a nice, clear, typo-free commit comment - - if you fixed an issue, refer to it in your commit comment - - follow the style guide (see below) + - Have a nice, clear, typo-free commit comment. + - If you fixed an issue, refer to it in your commit comment. + - Follow the style guide (see below). -- if you write new code, please add tests and docs for it +- If you write new code, please add tests and docs for it. -- run the tests, fix anything that comes up +- Run the tests, fix any issues that come up. -- make a pull request on github +- Make a pull request on GitHub. -- wait for review by other developers +- Wait for review by other developers. Branching model --------------- Borg development happens on the ``master`` branch and uses GitHub pull requests (if you don't have GitHub or don't want to use it you can -send smaller patches via the borgbackup :ref:`mailing_list` to the maintainers). +send smaller patches via the borgbackup mailing list to the maintainers). -Stable releases are maintained on maintenance branches named x.y-maint, eg. -the maintenance branch of the 1.0.x series is 1.0-maint. +Stable releases are maintained on maintenance branches named ``x.y-maint``, eg. +the maintenance branch of the 1.0.x series is ``1.0-maint``. -Most PRs should be made against the ``master`` branch. Only if an +Most PRs should be filed against the ``master`` branch. Only if an issue affects **only** a particular maintenance branch a PR should be -made against it directly. +filed against it directly. While discussing / reviewing a PR it will be decided whether the -change should be applied to maintenance branch(es). Each maintenance +change should be applied to maintenance branches. Each maintenance branch has a corresponding *backport/x.y-maint* label, which will then be applied. Changes that are typically considered for backporting: -- Data loss, corruption and inaccessibility fixes -- Security fixes -- Forward-compatibility improvements -- Documentation corrections +- Data loss, corruption and inaccessibility fixes. +- Security fixes. +- Forward-compatibility improvements. +- Documentation corrections. .. rubric:: Maintainer part @@ -113,7 +113,7 @@ troublesome due to merges growing more conflict-heavy and error-prone. Code and issues --------------- -Code is stored on Github, in the `Borgbackup organization +Code is stored on GitHub, in the `Borgbackup organization `_. `Issues `_ and `pull requests `_ should be sent there as @@ -131,13 +131,13 @@ you run the tests. Continuous Integration ---------------------- -All pull requests go through Travis-CI_, which runs the tests on Linux +All pull requests go through `GitHub Actions`_, which runs the tests on Linux and Mac OS X as well as the flake8 style checker. Windows builds run on AppVeyor_, while additional Unix-like platforms are tested on Golem_. .. _AppVeyor: https://ci.appveyor.com/project/borgbackup/borg/ .. _Golem: https://golem.enkore.de/view/Borg/ -.. _Travis-CI: https://travis-ci.org/borgbackup/borg +.. _GitHub Actions: https://github.com/borgbackup/borg/actions Output and Logging ------------------ @@ -156,7 +156,7 @@ controlled by that flag. See ``_setup_implied_logging()`` in Building a development environment ---------------------------------- -First, just install borg into a virtual env as described before. +First, just install borg into a virtual env :ref:`as described before `. To install some additional packages needed for running the tests, activate your virtual env and run:: @@ -182,7 +182,7 @@ Some more advanced examples:: # verify a changed tox.ini (run this after any change to tox.ini): fakeroot -u tox --recreate - fakeroot -u tox -e py34 # run all tests, but only on python 3.4 + fakeroot -u tox -e py37 # run all tests, but only on python 3.7 fakeroot -u tox borg.testsuite.locking # only run 1 test module @@ -200,16 +200,27 @@ Running more checks using coala First install coala and some checkers ("bears"): +:: + pip install -r requirements.d/coala.txt You can now run coala from the toplevel directory; it will read its settings from ``.coafile`` there: +:: + coala Some bears have additional requirements and they usually tell you about them in case they are missing. + +Adding a compression algorithm +------------------------------ + +If you want to add a new compression algorithm, please refer to :issue:`1633` +and leave a post there in order to discuss about the proposal. + Documentation ------------- @@ -222,7 +233,7 @@ parsers declared in the program and their documentation, which is embedded in the program (see archiver.py). These are committed to git for easier use by packagers downstream. -When a command is added, a commandline flag changed, added or removed, +When a command is added, a command line flag changed, added or removed, the usage docs need to be rebuilt as well:: python setup.py build_usage @@ -307,47 +318,60 @@ Creating a new release Checklist: -- make sure all issues for this milestone are closed or moved to the - next milestone -- find and fix any low hanging fruit left on the issue tracker -- check that Travis CI is happy -- update ``CHANGES.rst``, based on ``git log $PREVIOUS_RELEASE..`` -- check version number of upcoming release in ``CHANGES.rst`` -- verify that ``MANIFEST.in`` and ``setup.py`` are complete +- Make sure all issues for this milestone are closed or moved to the + next milestone. +- Check if there are any pending fixes for security issues. +- Find and fix any low hanging fruit left on the issue tracker. +- Check that GitHub Actions CI is happy. +- Update ``CHANGES.rst``, based on ``git log $PREVIOUS_RELEASE..``. +- Check version number of upcoming release in ``CHANGES.rst``. +- Render ``CHANGES.rst`` via ``make html`` and check for markup errors. +- Verify that ``MANIFEST.in`` and ``setup.py`` are complete. - ``python setup.py build_usage ; python setup.py build_man`` and - commit (be sure to build with Python 3.4 or 3.5 as Python 3.6 added `more + commit (be sure to build with Python 3.5 as Python 3.6 added `more guaranteed hashing algorithms - `_) -- tag the release:: + `_). +- Tag the release:: git tag -s -m "tagged/signed release X.Y.Z" X.Y.Z -- create a clean repo and use it for the following steps:: +- Create a clean repo and use it for the following steps:: git clone borg borg-clean This makes sure no uncommitted files get into the release archive. - It also will find if you forgot to commit something that is needed. - It also makes sure the vagrant machines only get committed files and + It will also reveal uncommitted required files. + Moreover, it makes sure the vagrant machines only get committed files and do a fresh start based on that. -- run tox and/or binary builds on all supported platforms via vagrant, - check for test failures -- create a release on PyPi:: +- Run tox and/or binary builds on all supported platforms via vagrant, + check for test failures. +- Create sdist, sign it, upload release to (test) PyPi: + + :: + + scripts/sdist-sign X.Y.Z + scripts/upload-pypi X.Y.Z test + scripts/upload-pypi X.Y.Z +- Put binaries into dist/borg-OSNAME and sign them: - python setup.py register sdist upload --identity="Thomas Waldmann" --sign + :: -- close release milestone on Github -- announce on: + scripts/sign-binaries 201912312359 +- Close the release milestone on GitHub. +- `Update borgbackup.org + `_ with the + new version number and release date. +- Announce on: - - Mailing list - - Twitter (follow @ThomasJWaldmann for these tweets) - - IRC channel (change ``/topic``) + - Mailing list. + - Twitter. + - IRC channel (change ``/topic``). -- create a Github release, include: +- Create a GitHub release, include: - * standalone binaries (see above for how to create them) + * Standalone binaries (see above for how to create them). - + for OS X, document the OS X Fuse version in the README of the binaries. + + For OS X, document the OS X Fuse version in the README of the binaries. OS X FUSE uses a kernel extension that needs to be compatible with the code contained in the binary. - * a link to ``CHANGES.rst`` + * A link to ``CHANGES.rst``. diff --git a/docs/faq.rst b/docs/faq.rst index fe5700cfa8..661b3414aa 100644 --- a/docs/faq.rst +++ b/docs/faq.rst @@ -8,50 +8,32 @@ Frequently asked questions Usage & Limitations ################### -Can I backup VM disk images? ----------------------------- - -Yes, the `deduplication`_ technique used by -|project_name| makes sure only the modified parts of the file are stored. -Also, we have optional simple sparse file support for extract. - -If you use non-snapshotting backup tools like Borg to back up virtual machines, -then the VMs should be turned off for the duration of the backup. Backing up live VMs can (and will) -result in corrupted or inconsistent backup contents: a VM image is just a regular file to -Borg with the same issues as regular files when it comes to concurrent reading and writing from -the same file. - -For backing up live VMs use file system snapshots on the VM host, which establishes -crash-consistency for the VM images. This means that with most file systems -(that are journaling) the FS will always be fine in the backup (but may need a -journal replay to become accessible). - -Usually this does not mean that file *contents* on the VM are consistent, since file -contents are normally not journaled. Notable exceptions are ext4 in data=journal mode, -ZFS and btrfs (unless nodatacow is used). - -Applications designed with crash-consistency in mind (most relational databases -like PostgreSQL, SQLite etc. but also for example Borg repositories) should always -be able to recover to a consistent state from a backup created with -crash-consistent snapshots (even on ext4 with data=writeback or XFS). - -Hypervisor snapshots capturing most of the VM's state can also be used for backups -and can be a better alternative to pure file system based snapshots of the VM's disk, -since no state is lost. Depending on the application this can be the easiest and most -reliable way to create application-consistent backups. - -Other applications may require a lot of work to reach application-consistency: -It's a broad and complex issue that cannot be explained in entirety here. - -Borg doesn't intend to address these issues due to their huge complexity -and platform/software dependency. Combining Borg with the mechanisms provided -by the platform (snapshots, hypervisor features) will be the best approach -to start tackling them. +What is the difference between a repo on an external hard drive vs. repo on a server? +------------------------------------------------------------------------------------- + +If Borg is running in client/server mode, the client uses SSH as a transport to +talk to the remote agent, which is another Borg process (Borg is installed on +the server, too). The Borg server is doing storage-related low-level repo +operations (get, put, commit, check, compact), while the Borg client does the +high-level stuff: deduplication, encryption, compression, dealing with +archives, backups, restores, etc., which reduces the amount of data that goes +over the network. + +When Borg is writing to a repo on a locally mounted remote file system, e.g. +SSHFS, the Borg client only can do file system operations and has no agent +running on the remote side, so *every* operation needs to go over the network, +which is slower. Can I backup from multiple servers into a single repository? ------------------------------------------------------------ -Yes, but in order for the deduplication used by |project_name| to work, it +Yes, this is *possible* from the technical standpoint, but it is +*not recommended* from the security perspective. |project_name| is +built upon a defined :ref:`attack_model` that cannot provide its +guarantees for multiple clients using the same repository. See +:ref:`borg_security_critique` for a detailed explanation. + +Also, in order for the deduplication used by |project_name| to work, it needs to keep a local cache containing checksums of all file chunks already stored in the repository. This cache is stored in ``~/.cache/borg/``. If |project_name| detects that a repository has been @@ -66,22 +48,77 @@ or deleting archives, which may make *simultaneous* backups fail. Can I copy or synchronize my repo to another location? ------------------------------------------------------ -Yes, you could just copy all the files. Make sure you do that while no -backup is running (use `borg with-lock ...`). So what you get here is this: +If you want to have redundant backup repositories (preferably at separate +locations), the recommended way to do that is like this: +- ``borg init repo1`` +- ``borg init repo2`` - client machine ---borg create---> repo1 -- repo1 ---copy---> repo2 +- client machine ---borg create---> repo2 + +This will create distinct repositories (separate repo ID, separate +keys) and nothing bad happening in repo1 will influence repo2. + +Some people decide against above recommendation and create identical +copies of a repo (using some copy / sync / clone tool). + +While this might be better than having no redundancy at all, you have +to be very careful about how you do that and what you may / must not +do with the result (if you decide against our recommendation). -There is no special borg command to do the copying, just use cp or rsync if -you want to do that. +What you would get with this is: + +- client machine ---borg create---> repo +- repo ---copy/sync---> copy-of-repo + +There is no special borg command to do the copying, you could just +use any reliable tool that creates an identical copy (cp, rsync, rclone +might be options). But think about whether that is really what you want. If something goes -wrong in repo1, you will have the same issue in repo2 after the copy. +wrong in repo, you will have the same issue in copy-of-repo. -If you want to have 2 independent backups, it is better to do it like this: +Make sure you do the copy/sync while no backup is running, see +:ref:`borg_with-lock` about how to do that. -- client machine ---borg create---> repo1 -- client machine ---borg create---> repo2 +Also, you must not run borg against multiple instances of the same repo +(like repo and copy-of-repo) as that would create severe issues: + +- Data loss: they have the same repository ID, so the borg client will + think they are identical and e.g. use the same local cache for them + (which is an issue if they happen to be not the same). + See :issue:`4272` for an example. +- Encryption security issues if you would update repo and copy-of-repo + independently, due to AES counter reuse. + +There is also a similar encryption security issue for the disaster case: +If you lose repo and the borg client-side config/cache and you restore +the repo from an older copy-of-repo, you also run into AES counter reuse. + +"this is either an attack or unsafe" warning +-------------------------------------------- + +About the warning: + + Cache, or information obtained from the security directory is newer than + repository - this is either an attack or unsafe (multiple repos with same ID) + +"unsafe": If not following the advice from the previous section, you can easily +run into this by yourself by restoring an older copy of your repository. + +"attack": maybe an attacker has replaced your repo by an older copy, trying to +trick you into AES counter reuse, trying to break your repo encryption. + +If you'ld decide to ignore this and accept unsafe operation for this repository, +you could delete the manifest-timestamp and the local cache: + +:: + + borg config repo id # shows the REPO_ID + rm ~/.config/borg/REPO_ID/manifest-timestamp + borg delete --cache-only REPO + +This is an unsafe and unsupported way to use borg, you have been warned. Which file types, attributes, etc. are *not* preserved? ------------------------------------------------------- @@ -97,6 +134,9 @@ Which file types, attributes, etc. are *not* preserved? Archive extraction has optional support to extract all-zero chunks as holes in a sparse file. * Some filesystem specific attributes, like btrfs NOCOW, see :ref:`platforms`. + * For hardlinked symlinks, the hardlinking can not be archived (and thus, + the hardlinking will not be done at extraction time). The symlinks will + be archived and extracted as non-hardlinked symlinks, see :issue:`2379`. Are there other known limitations? ---------------------------------- @@ -105,11 +145,16 @@ Are there other known limitations? usually corresponding to tens or hundreds of millions of files/dirs. When trying to go beyond that limit, you will get a fatal IntegrityError exception telling that the (archive) object is too big. - An easy workaround is to create multiple archives with less items each. + An easy workaround is to create multiple archives with fewer items each. See also the :ref:`archive_limitation` and :issue:`1452`. :ref:`borg_info` shows how large (relative to the maximum size) existing archives are. +- borg extract only supports restoring into an empty destination. After that, + the destination will exactly have the contents of the extracted archive. + If you extract into a non-empty destination, borg will (for example) not + remove files which are in the destination, but not in the archive. + See :issue:`4598` for a workaround and more details. .. _checkpoints_parts: @@ -129,10 +174,11 @@ completed is useful because it references all the transmitted chunks up to the checkpoint. This means that in case of an interruption, you only need to retransfer the data since the last checkpoint. -If a backup was interrupted, you do not need to do any special considerations, -just invoke ``borg create`` as you always do. You may use the same archive name -as in previous attempt or a different one (e.g. if you always include the current -datetime), it does not matter. +If a backup was interrupted, you normally do not need to do anything special, +just invoke ``borg create`` as you always do. If the repository is still locked, +you may need to run ``borg break-lock`` before the next backup. You may use the +same archive name as in previous attempt or a different one (e.g. if you always +include the current datetime), it does not matter. |project_name| always does full single-pass backups, so it will start again from the beginning - but it will be much faster, because some of the data was @@ -152,6 +198,11 @@ really desperate (e.g. if you have no completed backup of that file and you'ld rather get a partial file extracted than nothing). You do **not** want to give that option under any normal circumstances. +Note that checkpoints inside files are created only since version 1.1, make +sure you have an up-to-date version of borgbackup if you want to continue +instead of retransferring a huge file. In some cases, there is only an outdated +version shipped with your distribution (e.g. Debian). See :ref:`installation`. + How can I backup huge file(s) over a unstable connection? --------------------------------------------------------- @@ -159,6 +210,34 @@ This is not a problem anymore. For more details, see :ref:`checkpoints_parts`. +How can I switch append-only mode on and off? +----------------------------------------------------------------------------------------------------------------------------------- + +You could do that (via borg config REPO append_only 0/1), but using different +ssh keys and different entries in ``authorized_keys`` is much easier and also +maybe has less potential of things going wrong somehow. + + +My machine goes to sleep causing `Broken pipe` +---------------------------------------------- + +When backing up your data over the network, your machine should not go to sleep. +On macOS you can use `caffeinate` to avoid that. + +How can I switch append-only mode on and off? +----------------------------------------------------------------------------------------------------------------------------------- + +You could do that (via borg config REPO append_only 0/1), but using different +ssh keys and different entries in ``authorized_keys`` is much easier and also +maybe has less potential of things going wrong somehow. + + +My machine goes to sleep causing `Broken pipe` +---------------------------------------------- + +When backing up your data over the network, your machine should not go to sleep. +On macOS you can use `caffeinate` to avoid that. + How can I restore huge file(s) over an unstable connection? ----------------------------------------------------------- @@ -212,7 +291,7 @@ I get an IntegrityError or similar - what now? A single error does not necessarily indicate bad hardware or a Borg bug. All hardware exhibits a bit error rate (BER). Hard drives are typically -specified as exhibiting less than one error every 12 to 120 TB +specified as exhibiting fewer than one error every 12 to 120 TB (one bit error in 10e14 to 10e15 bits). The specification is often called *unrecoverable read error rate* (URE rate). @@ -285,23 +364,168 @@ needs to be ascertained and fixed. issues. We recommend to first run without ``--repair`` to assess the situation. If the found issues and proposed repairs seem right, re-run "check" with ``--repair`` enabled. +How probable is it to get a hash collision problem? +--------------------------------------------------- + +If you noticed, there are some issues (:issue:`170` (**warning: hell**) and :issue:`4884`) +about the probability of a chunk having the same hash as another chunk, making the file +corrupted because it grabbed the wrong chunk. This is called the `Birthday Problem +`_. + +There is a lot of probability in here so, I can give you my interpretation of +such math but it's honestly better that you read it yourself and grab your own +resolution from that. + +Assuming that all your chunks have a size of :math:`2^{21}` bytes (approximately 2.1 MB) +and we have a "perfect" hash algorithm, we can think that the probability of collision +would be of :math:`p^2/2^{n+1}` then, using SHA-256 (:math:`n=256`) and for example +we have 1000 million chunks (:math:`p=10^9`) (1000 million chunks would be about 2100TB). +The probability would be around to 0.0000000000000000000000000000000000000000000000000000000000043. + +A mass-murderer space rock happens about once every 30 million years on average. +This leads to a probability of such an event occurring in the next second to about :math:`10^{-15}`. +That's **45** orders of magnitude more probable than the SHA-256 collision. Briefly stated, +if you find SHA-256 collisions scary then your priorities are wrong. This example was grabbed from +`this SO answer `_, it's great honestly. + +Still, the real question is if Borg tries to not make this happen? + +Well... it used to not check anything but there was a feature added which saves the size +of the chunks too, so the size of the chunks is compared to the size that you got with the +hash and if the check says there is a mismatch it will raise an exception instead of corrupting +the file. This doesn't save us from everything but reduces the chances of corruption. +There are other ways of trying to escape this but it would affect performance so much that +it wouldn't be worth it and it would contradict Borg's design, so if you don't want this to +happen, simply don't use Borg. + +Why is the time elapsed in the archive stats different from wall clock time? +---------------------------------------------------------------------------- + +Borg needs to write the time elapsed into the archive metadata before finalizing +the archive, compacting the segments, and committing the repo & cache. This means +when Borg is run with e.g. the ``time`` command, the duration shown in the archive +stats may be shorter than the full time the command runs for. + +How do I configure different prune policies for different directories? +---------------------------------------------------------------------- + +Say you want to prune ``/var/log`` faster than the rest of +``/``. How do we implement that? The answer is to backup to different +archive *names* and then implement different prune policies for +different prefixes. For example, you could have a script that does:: + + borg create --exclude /var/log $REPOSITORY:main-$(date +%Y-%m-%d) / + borg create $REPOSITORY:logs-$(date +%Y-%m-%d) /var/log + +Then you would have two different prune calls with different policies:: + + borg prune --verbose --list -d 30 --prefix main- "$REPOSITORY" + borg prune --verbose --list -d 7 --prefix logs- "$REPOSITORY" + +This will keep 7 days of logs and 30 days of everything else. Borg 1.1 +also supports the ``--glob-archives`` parameter. + +How do I remove files from an existing backup? +---------------------------------------------- + +Say you now want to remove old logfiles because you changed your +backup policy as described above. The only way to do this is to use +the :ref:`borg_recreate` command to rewrite all archives with a +different ``--exclude`` pattern. See the examples in the +:ref:`borg_recreate` manpage for more information. + +Can I safely change the compression level or algorithm? +-------------------------------------------------------- + +The compression level and algorithm don't affect deduplication. Chunk ID hashes +are calculated *before* compression. New compression settings +will only be applied to new chunks, not existing chunks. So it's safe +to change them. + + Security ######## +.. _borg_security_critique: + +Isn't BorgBackup's AES-CTR crypto broken? +----------------------------------------- + +If a nonce (counter) value is reused, AES-CTR mode crypto is broken. + +To exploit the AES counter management issue, an attacker would need to have +access to the borg repository. + +By tampering with the repo, the attacker could bring the repo into a state so +that it reports a lower "highest used counter value" than the one that actually +was used. The client would usually notice that, because it rather trusts the +clientside stored "highest used counter value" than trusting the server. + +But there are situations, where this is simply not possible: + +- If clients A and B used the repo, the client A can only know its own highest + CTR value, but not the one produced by B. That is only known to (B and) the + server (the repo) and thus the client A needs to trust the server about the + value produced by B in that situation. You can't do much about this except + not having multiple clients per repo. + +- Even if there is only one client, if client-side information is completely + lost (e.g. due to disk defect), the client also needs to trust the value from + server side. You can avoid this by not continuing to write to the repository + after you have lost clientside borg information. + +.. _home_config_borg: + +How important is the $HOME/.config/borg directory? +-------------------------------------------------- + +The Borg config directory has content that you should take care of: + +``security`` subdirectory + Each directory here represents one Borg repository by its ID and contains the last known status. + If a repository's status is different from this information at the beginning of BorgBackup + operation, Borg outputs warning messages and asks for confirmation, so make sure you do not lose + or manipulate these files. However, apart from those warnings, a loss of these files can be + recovered. + +``keys`` subdirectory + In this directory all your repository keyfiles are stored. You MUST make sure to have an + independent backup of these keyfiles, otherwise you cannot access your backups anymore if you lose + them. You also MUST keep these files secret; everyone who gains access to your repository and has + the corresponding keyfile (and the key passphrase) can extract it. + +Make sure that only you have access to the Borg config directory. + +.. _cache_security: + +Do I need to take security precautions regarding the cache? +----------------------------------------------------------- + +The cache contains a lot of metadata information about the files in +your repositories and it is not encrypted. + +However, the assumption is that the cache is being stored on the very +same system which also contains the original files which are being +backed up. So someone with access to the cache files would also have +access the original files anyway. + +The Internals section contains more details about :ref:`cache`. If you ever need to move the cache +to a different location, this can be achieved by using the appropriate :ref:`env_vars`. + How can I specify the encryption passphrase programmatically? ------------------------------------------------------------- There are several ways to specify a passphrase without human intervention: Setting ``BORG_PASSPHRASE`` - The passphrase can be specified using the ``BORG_PASSPHRASE`` enviroment variable. + The passphrase can be specified using the ``BORG_PASSPHRASE`` environment variable. This is often the simplest option, but can be insecure if the script that sets it is world-readable. .. _password_env: .. note:: Be careful how you set the environment; using the ``env`` command, a ``system()`` call or using inline shell scripts - (e.g. ``BORG_PASSPHRASE=hunter12 borg ...``) + (e.g. ``BORG_PASSPHRASE=hunter2 borg ...``) might expose the credentials in the process list directly and they will be readable to all users on a system. Using ``export`` in a shell script file should be safe, however, as @@ -314,22 +538,23 @@ Using ``BORG_PASSCOMMAND`` with a properly permissioned file directory and use permissions to keep anyone else from reading it. For example, first create a key:: - head -c 1024 /dev/urandom | base64 > ~/.borg-passphrase + head -c 32 /dev/urandom | base64 -w 0 > ~/.borg-passphrase chmod 400 ~/.borg-passphrase Then in an automated script one can put:: - export BORG_PASSCOMMAND="cat ~/.borg-passphrase" + export BORG_PASSCOMMAND="cat $HOME/.borg-passphrase" and Borg will automatically use that passphrase. Using keyfile-based encryption with a blank passphrase It is possible to encrypt your repository in ``keyfile`` mode instead of the default - ``repokey`` mode and use a blank passphrase for the key file. See :ref:`encrypted_repos` + ``repokey`` mode and use a blank passphrase for the key file (simply press Enter twice + when ``borg init`` asks for the password). See :ref:`encrypted_repos` for more details. -Using ``BORG_PASSCOMMAND`` with MacOS Keychain - MacOS has a native manager for secrets (such as passphrases) which is safer +Using ``BORG_PASSCOMMAND`` with macOS Keychain + macOS has a native manager for secrets (such as passphrases) which is safer than just using a file as it is encrypted at rest and unlocked manually (fortunately, the login keyring automatically unlocks when you login). With the built-in ``security`` command, you can access it from the command line, @@ -338,11 +563,11 @@ Using ``BORG_PASSCOMMAND`` with MacOS Keychain First generate a passphrase and use ``security`` to save it to your login (default) keychain:: - security add-generic-password -D secret -U -a $USER -s borg-passphrase -w $(head -c 1024 /dev/urandom | base64) + security add-generic-password -D secret -U -a $USER -s borg-passphrase -w $(head -c 32 /dev/urandom | base64 -w 0) In your backup script retrieve it in the ``BORG_PASSCOMMAND``:: - export BORG_PASSCOMMAND="security find-generic-password -a $USER -s borg-passphrase" + export BORG_PASSCOMMAND="security find-generic-password -a $USER -s borg-passphrase -w" Using ``BORG_PASSCOMMAND`` with GNOME Keyring GNOME also has a keyring daemon that can be used to store a Borg passphrase. @@ -356,7 +581,7 @@ Using ``BORG_PASSCOMMAND`` with GNOME Keyring Then add a secret to the login keyring:: - head -c 1024 /dev/urandom | base64 | secret-tool store borg-repository repo-name --label="Borg Passphrase" + head -c 32 /dev/urandom | base64 -w 0 | secret-tool store borg-repository repo-name --label="Borg Passphrase" If a dialog box pops up prompting you to pick a password for a new keychain, use your login password. If there is a checkbox for automatically unlocking on login, check it @@ -380,7 +605,7 @@ Using ``BORG_PASSCOMMAND`` with KWallet ``kwalletcli`` can be used to store and retrieve secrets. Ensure ``kwalletcli`` is installed, generate a passphrase, and store it in your "wallet":: - head -c 1024 /dev/urandom | base64 | kwalletcli -Pe borg-passphrase -f Passwords + head -c 32 /dev/urandom | base64 -w 0 | kwalletcli -Pe borg-passphrase -f Passwords Once the secret is saved, retrieve it in a backup script using ``BORG_PASSCOMMAND``:: @@ -416,10 +641,12 @@ C to delete all backups residing on S. These are your options to protect against that: - Do not allow to permanently delete data from the repo, see :ref:`append_only_mode`. -- Use a pull-mode setup using ``ssh -R``, see :issue:`900`. +- Use a pull-mode setup using ``ssh -R``, see :ref:`pull_backup` for more information. - Mount C's filesystem on another machine and then create a backup of it. - Do not give C filesystem-level access to S. +See :ref:`hosting_repositories` for a detailed protection guide. + How can I protect against a hacked backup server? ------------------------------------------------- @@ -460,6 +687,46 @@ Please disclose security issues responsibly. Common issues ############# +Why does Borg extract hang after some time? +------------------------------------------- + +When I do a ``borg extract``, after a while all activity stops, no cpu usage, +no downloads. + +This may happen when the SSH connection is stuck on server side. You can +configure SSH on client side to prevent this by sending keep-alive requests, +for example in ~/.ssh/config: + +:: + + Host borg.example.com + # Client kills connection after 3*30 seconds without server response: + ServerAliveInterval 30 + ServerAliveCountMax 3 + +You can also do the opposite and configure SSH on server side in +/etc/ssh/sshd_config, to make the server send keep-alive requests to the client: + +:: + + # Server kills connection after 3*30 seconds without client response: + ClientAliveInterval 30 + ClientAliveCountMax 3 + +How can I deal with my very unstable SSH connection? +---------------------------------------------------- + +If you have issues with lost connections during long-running borg commands, you +could try to work around: + +- Make partial extracts like ``borg extract REPO PATTERN`` to do multiple + smaller extraction runs that complete before your connection has issues. +- Try using ``borg mount REPO MOUNTPOINT`` and ``rsync -avH`` from + ``MOUNTPOINT`` to your desired extraction directory. If the connection breaks + down, just repeat that over and over again until rsync does not find anything + to do any more. Due to the way borg mount works, this might be less efficient + than borg extract for bigger volumes of data. + Why do I get "connection closed by remote" after a while? --------------------------------------------------------- @@ -492,14 +759,24 @@ connections and release the lock). The borg cache eats way too much disk space, what can I do? ----------------------------------------------------------- +This may especially happen if borg needs to rebuild the local "chunks" index - +either because it was removed, or because it was not coherent with the +repository state any more (e.g. because another borg instance changed the +repository). + +To optimize this rebuild process, borg caches per-archive information in the +``chunks.archive.d/`` directory. It won't help the first time it happens, but it +will make the subsequent rebuilds faster (because it needs to transfer less data +from the repository). While being faster, the cache needs quite some disk space, +which might be unwanted. + There is a temporary (but maybe long lived) hack to avoid using lots of disk space for chunks.archive.d (see :issue:`235` for details): :: # this assumes you are working with the same user as the backup. - # you can get the REPOID from the "config" file inside the repository. - cd ~/.cache/borg/ + cd ~/.cache/borg/$(borg config /path/to/repo id) rm -rf chunks.archive.d ; touch chunks.archive.d This deletes all the cached archive chunk indexes and replaces the directory @@ -546,6 +823,67 @@ If you run into that, try this: the parent directory (or even everything) - mount the repo using FUSE and use some file manager +.. _expected_performance: + +What's the expected backup performance? +--------------------------------------- + +A first backup will usually be somehow "slow" because there is a lot of data +to process. Performance here depends on a lot of factors, so it is hard to +give specific numbers. + +Subsequent backups are usually very fast if most files are unchanged and only +a few are new or modified. The high performance on unchanged files primarily depends +only on a few factors (like fs recursion + metadata reading performance and the +files cache working as expected) and much less on other factors. + +E.g., for this setup: + +- server grade machine (4C/8T 2013 Xeon, 64GB RAM, 2x good 7200RPM disks) +- local zfs filesystem (mirrored) containing the backup source data +- repository is remote (does not matter much for unchanged files) +- backup job runs while machine is otherwise idle + +The observed performance is that |project_name| can process about +**1 million unchanged files (and a few small changed ones) in 4 minutes!** + +If you are seeing much less than that in similar circumstances, read the next +few FAQ entries below. + +.. _slow_backup: + +Why is backup slow for me? +-------------------------- + +So, if you feel your |project_name| backup is too slow somehow, you should find out why. + +The usual way to approach this is to add ``--list --filter=AME --stats`` to your +``borg create`` call to produce more log output, including a file list (with file status +characters) and also some statistics at the end of the backup. + +Then you do the backup and look at the log output: + +- stats: Do you really have little changes or are there more changes than you thought? + In the stats you can see the overall volume of changed data, which needed to be + added to the repo. If that is a lot, that can be the reason why it is slow. +- ``A`` status ("added") in the file list: + If you see that often, you have a lot of new files (files that |project_name| did not find + in the files cache). If you think there is something wrong with that (the file was there + already in the previous backup), please read the FAQ entries below. +- ``M`` status ("modified") in the file list: + If you see that often, |project_name| thinks that a lot of your files might be modified + (|project_name| found them in the files cache, but the metadata read from the filesystem did + not match the metadata stored in the files cache). + In such a case, |project_name| will need to process the files' contents completely, which is + much slower than processing unmodified files (|project_name| does not read their contents!). + The metadata values used in this comparison are determined by the ``--files-cache`` option + and could be e.g. size, ctime and inode number (see the ``borg create`` docs for more + details and potential issues). + You can use the ``stat`` command on files to manually look at fs metadata to debug if + there is any unexpected change triggering the ``M`` status. + +See also the next few FAQ entries for more details. + .. _a_status_oddity: I am seeing 'A' (added) status for an unchanged file!? @@ -553,15 +891,15 @@ I am seeing 'A' (added) status for an unchanged file!? The files cache is used to determine whether |project_name| already "knows" / has backed up a file and if so, to skip the file from -chunking. It does intentionally *not* contain files that have a modification -time (mtime) same as the newest mtime in the created archive. +chunking. It intentionally *excludes* files that have a timestamp +which is the same as the newest timestamp in the created archive. So, if you see an 'A' status for unchanged file(s), they are likely the files -with the most recent mtime in that archive. +with the most recent timestamp in that archive. This is expected: it is to avoid data loss with files that are backed up from a snapshot and that are immediately changed after the snapshot (but within -mtime granularity time, so the mtime would not change). Without the code that +timestamp granularity time, so the timestamp would not change). Without the code that removes these files from the files cache, the change that happened right after the snapshot would not be contained in the next backup as |project_name| would think the file is unchanged. @@ -572,7 +910,7 @@ mentioned rare condition), it will just re-use them as usual and not store new data chunks. If you want to avoid unnecessary chunking, just create or touch a small or -empty file in your backup source file set (so that one has the latest mtime, +empty file in your backup source file set (so that one has the latest timestamp, not your 50GB VM disk image) and, if you do snapshots, do the snapshot after that. @@ -580,15 +918,25 @@ Since only the files cache is used in the display of files status, those files are reported as being added when, really, chunks are already used. +By default, ctime (change time) is used for the timestamps to have a rather +safe change detection (see also the --files-cache option). + +Furthermore, pathnames recorded in files cache are always absolute, even if you specify +source directories with relative pathname. If relative pathnames are stable, but absolute are +not (for example if you mount a filesystem without stable mount points for each backup or +if you are running the backup from a filesystem snapshot whose name is not stable), borg +will assume that files are different and will report them as 'added', even though no new +chunks will be actually recorded for them. To avoid this, you could bind mount your source +directory in a directory with the stable path. .. _always_chunking: It always chunks all my files, even unchanged ones! --------------------------------------------------- -|project_name| maintains a files cache where it remembers the mtime, size and -inode of files. When |project_name| does a new backup and starts processing a -file, it first looks whether the file has changed (compared to the values +|project_name| maintains a files cache where it remembers the timestamp, size +and inode of files. When |project_name| does a new backup and starts processing +a file, it first looks whether the file has changed (compared to the values stored in the files cache). If the values are the same, the file is assumed unchanged and thus its contents won't get chunked (again). @@ -609,7 +957,8 @@ it would be much faster. Another possible reason is that files don't always have the same path, for example if you mount a filesystem without stable mount points for each backup or if you are running the backup from a filesystem snapshot whose name is not stable. If the directory where you mount a filesystem is different every time, -|project_name| assume they are different files. +|project_name| assumes they are different files. This is true even if you backup these files with relative pathnames - borg uses full +pathnames in files cache regardless. Is there a way to limit bandwidth with |project_name|? @@ -622,25 +971,48 @@ There is no built-in way to limit *download* (i.e. :ref:`borg_extract`) bandwidth, but limiting download bandwidth can be accomplished with pipeviewer_: -Create a wrapper script: /usr/local/bin/pv-wrapper :: +Create a wrapper script: /usr/local/bin/pv-wrapper + +:: - #!/bin/bash + #!/bin/sh ## -q, --quiet do not output any transfer information at all ## -L, --rate-limit RATE limit transfer to RATE bytes per second - export RATE=307200 + RATE=307200 pv -q -L $RATE | "$@" -Add BORG_RSH environment variable to use pipeviewer wrapper script with ssh. :: +Add BORG_RSH environment variable to use pipeviewer wrapper script with ssh. + +:: export BORG_RSH='/usr/local/bin/pv-wrapper ssh' -Now |project_name| will be bandwidth limited. Nice thing about pv is that you can change rate-limit on the fly: :: +Now |project_name| will be bandwidth limited. Nice thing about pv is that you can change rate-limit on the fly: + +:: pv -R $(pidof pv) -L 102400 .. _pipeviewer: http://www.ivarch.com/programs/pv.shtml +How can I avoid unwanted base directories getting stored into archives? +----------------------------------------------------------------------- + +Possible use cases: + +- Another file system is mounted and you want to backup it with original paths. +- You have created a BTRFS snapshot in a ``/.snapshots`` directory for backup. + +To achieve this, run ``borg create`` within the mountpoint/snapshot directory: + +:: + + # Example: Some file system mounted in /mnt/rootfs. + cd /mnt/rootfs + borg create /path/to/repo::rootfs_backup . + + I am having troubles with some network/FUSE/special filesystem, why? -------------------------------------------------------------------- @@ -682,6 +1054,54 @@ the corruption is caused by a one time event such as a power outage, running `borg check --repair` will fix most problems. +Why isn't there more progress / ETA information displayed? +---------------------------------------------------------- + +Some borg runs take quite a bit, so it would be nice to see a progress display, +maybe even including a ETA (expected time of "arrival" [here rather "completion"]). + +For some functionality, this can be done: if the total amount of work is more or +less known, we can display progress. So check if there is a ``--progress`` option. + +But sometimes, the total amount is unknown (e.g. for ``borg create`` we just do +a single pass over the filesystem, so we do not know the total file count or data +volume before reaching the end). Adding another pass just to determine that would +take additional time and could be incorrect, if the filesystem is changing. + +Even if the fs does not change and we knew count and size of all files, we still +could not compute the ``borg create`` ETA as we do not know the amount of changed +chunks, how the bandwidth of source and destination or system performance might +fluctuate. + +You see, trying to display ETA would be futile. The borg developers prefer to +rather not implement progress / ETA display than doing futile attempts. + +See also: https://xkcd.com/612/ + + +Why am I getting 'Operation not permitted' errors when backing up on sshfs? +--------------------------------------------------------------------------- + +By default, ``sshfs`` is not entirely POSIX-compliant when renaming files due to +a technicality in the SFTP protocol. Fortunately, it also provides a workaround_ +to make it behave correctly:: + + sshfs -o workaround=rename user@host:dir /mnt/dir + +.. _workaround: https://unix.stackexchange.com/a/123236 + + +Can I disable checking for free disk space? +------------------------------------------- + +In some cases, the free disk space of the target volume is reported incorrectly. +This can happen for CIFS- or FUSE shares. If you are sure that your target volume +will always have enough disk space, you can use the following workaround to disable +checking for free disk space:: + + borg config -- $REPO_LOCATION additional_free_space -2T + + Miscellaneous ############# @@ -751,7 +1171,7 @@ Here's a (incomplete) list of some major changes: * more open, faster paced development (see `issue #1 `_) * less chunk management overhead (less memory and disk usage for chunks index) * faster remote cache resync (useful when backing up multiple machines into same repo) -* compression: no, lz4, zlib or lzma compression, adjustable compression levels +* compression: no, lz4, zstd, zlib or lzma compression, adjustable compression levels * repokey replaces problematic passphrase mode (you can't change the passphrase nor the pbkdf2 iteration count in "passphrase" mode) * simple sparse file support, great for virtual machine disk files * can read special files (e.g. block devices) or from stdin, write to stdout @@ -759,7 +1179,7 @@ Here's a (incomplete) list of some major changes: * uses fadvise to not spoil / blow up the fs cache * better error messages / exception handling * better logging, screen output, progress indication -* tested on misc. Linux systems, 32 and 64bit, FreeBSD, OpenBSD, NetBSD, MacOS +* tested on misc. Linux systems, 32 and 64bit, FreeBSD, OpenBSD, NetBSD, macOS Please read the :ref:`changelog` (or ``docs/changes.rst`` in the source distribution) for more information. @@ -779,7 +1199,7 @@ There are some caveats: - If the repository is in "keyfile" encryption mode, the keyfile must exist locally or it must be manually moved after performing the upgrade: - 1. Locate the repository ID, contained in the ``config`` file in the repository. + 1. Get the repository ID with ``borg config /path/to/repo id``. 2. Locate the attic key file at ``~/.attic/keys/``. The correct key for the repository starts with the line ``ATTIC_KEY ``. 3. Copy the attic key file to ``~/.config/borg/keys/`` @@ -803,8 +1223,7 @@ The default in Borg is lz4, which is fast enough to not use significant CPU time in most cases, but can only achieve modest compression. It still compresses easily compressed data fairly well. -zlib compression with all levels (1-9) as well as LZMA (1-6) are available -as well, for cases where they are worth it. +Borg also offers zstd, zlib and lzma compression, choose wisely. Which choice is the best option depends on a number of factors, like bandwidth to the repository, how well the data compresses, available CPU diff --git a/docs/global.rst.inc b/docs/global.rst.inc index 88c659a36e..c9589f1345 100644 --- a/docs/global.rst.inc +++ b/docs/global.rst.inc @@ -15,6 +15,7 @@ .. _libacl: https://savannah.nongnu.org/projects/acl/ .. _libattr: https://savannah.nongnu.org/projects/attr/ .. _liblz4: https://github.com/Cyan4973/lz4 +.. _libzstd: https://github.com/facebook/zstd .. _libb2: https://github.com/BLAKE2/libb2 .. _OpenSSL: https://www.openssl.org/ .. _`Python 3`: https://www.python.org/ diff --git a/docs/installation.rst b/docs/installation.rst index de7a582aa8..ad2c405227 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -41,44 +41,62 @@ package which can be installed with the package manager. ============ ============================================= ======= Distribution Source Command ============ ============================================= ======= +Alpine Linux `Alpine repository`_ ``apk add borgbackup`` Arch Linux `[community]`_ ``pacman -S borg`` Debian `Debian packages`_ ``apt install borgbackup`` Gentoo `ebuild`_ ``emerge borgbackup`` GNU Guix `GNU Guix`_ ``guix package --install borg`` Fedora/RHEL `Fedora official repository`_ ``dnf install borgbackup`` FreeBSD `FreeBSD ports`_ ``cd /usr/ports/archivers/py-borgbackup && make install clean`` +macOS `Homebrew`_ | ``brew install borgbackup`` (official formula, **no** FUSE support) + | **or** + | ``brew install --cask macfuse`` (`private Tap`_, FUSE support) + | ``brew install borgbackup/tap/borgbackup-fuse`` Mageia `cauldron`_ ``urpmi borgbackup`` NetBSD `pkgsrc`_ ``pkg_add py-borgbackup`` NixOS `.nix file`_ N/A OpenBSD `OpenBSD ports`_ ``pkg_add borgbackup`` OpenIndiana `OpenIndiana hipster repository`_ ``pkg install borg`` openSUSE `openSUSE official repository`_ ``zypper in borgbackup`` -OS X `Brew cask`_ ``brew cask install borgbackup`` Raspbian `Raspbian testing`_ ``apt install borgbackup`` Ubuntu `Ubuntu packages`_, `Ubuntu PPA`_ ``apt install borgbackup`` ============ ============================================= ======= +.. _Alpine repository: https://pkgs.alpinelinux.org/packages?name=borgbackup .. _[community]: https://www.archlinux.org/packages/?name=borg .. _Debian packages: https://packages.debian.org/search?keywords=borgbackup&searchon=names&exact=1&suite=all§ion=all .. _Fedora official repository: https://apps.fedoraproject.org/packages/borgbackup -.. _FreeBSD ports: http://www.freshports.org/archivers/py-borgbackup/ +.. _FreeBSD ports: https://www.freshports.org/archivers/py-borgbackup/ .. _ebuild: https://packages.gentoo.org/packages/app-backup/borgbackup .. _GNU Guix: https://www.gnu.org/software/guix/package-list.html#borg .. _pkgsrc: http://pkgsrc.se/sysutils/py-borgbackup .. _cauldron: http://madb.mageia.org/package/show/application/0/release/cauldron/name/borgbackup .. _.nix file: https://github.com/NixOS/nixpkgs/blob/master/pkgs/tools/backup/borg/default.nix -.. _OpenBSD ports: http://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/sysutils/borgbackup/ -.. _OpenIndiana hipster repository: http://pkg.openindiana.org/hipster/en/search.shtml?token=borg&action=Search -.. _openSUSE official repository: http://software.opensuse.org/package/borgbackup -.. _Brew cask: https://caskroom.github.io/ -.. _Raspbian testing: http://archive.raspbian.org/raspbian/pool/main/b/borgbackup/ -.. _Ubuntu packages: http://packages.ubuntu.com/xenial/borgbackup +.. _OpenBSD ports: https://cvsweb.openbsd.org/cgi-bin/cvsweb/ports/sysutils/borgbackup/ +.. _OpenIndiana hipster repository: https://pkg.openindiana.org/hipster/en/search.shtml?token=borg&action=Search +.. _openSUSE official repository: https://software.opensuse.org/package/borgbackup +.. _Homebrew: https://formulae.brew.sh/formula/borgbackup +.. _private Tap: https://github.com/borgbackup/homebrew-tap +.. _Raspbian testing: https://archive.raspbian.org/raspbian/pool/main/b/borgbackup/ +.. _Ubuntu packages: https://packages.ubuntu.com/xenial/borgbackup .. _Ubuntu PPA: https://launchpad.net/~costamagnagianfranco/+archive/ubuntu/borgbackup Please ask package maintainers to build a package or, if you can package / submit it yourself, please help us with that! See :issue:`105` on github to followup on packaging efforts. +**Current status of package in the repositories** + +.. start-badges + +|Packaging status| + +.. |Packaging status| image:: https://repology.org/badge/vertical-allrepos/borgbackup.svg + :alt: Packaging status + :target: https://repology.org/project/borgbackup/versions + +.. end-badges + .. _pyinstaller-binary: Standalone Binary @@ -87,13 +105,17 @@ Standalone Binary .. note:: Releases are signed with an OpenPGP key, see :ref:`security-contact` for more instructions. -|project_name| binaries (generated with `pyinstaller`_) are available -on the releases_ page for the following platforms: +|project_name| x86/x64 amd/intel compatible binaries (generated with `pyinstaller`_) +are available on the releases_ page for the following platforms: -* **Linux**: glibc >= 2.13 (ok for most supported Linux releases). +* **Linux**: glibc >= 2.19 (ok for most supported Linux releases). Older glibc releases are untested and may not work. -* **Mac OS X**: 10.10 (does not work with older OS X releases) -* **FreeBSD**: 10.2 (unknown whether it works for older releases) +* **MacOS**: 10.10 or newer (To avoid signing issues download the file via + command line **or** remove the ``quarantine`` attribute after downloading: + ``$ xattr -dr com.apple.quarantine borg-macosx64.tgz``) +* **FreeBSD**: 10.3 (unknown whether it works for older releases) + +ARM binaries are built by Johann Bauer, see: https://borg.bauerj.eu/ To install such a binary, just drop it into a directory in your ``PATH``, make borg readable and executable for its users and then you can run ``borg``:: @@ -136,16 +158,21 @@ Dependencies To install |project_name| from a source package (including pip), you have to install the following dependencies first: -* `Python 3`_ >= 3.4.0, plus development headers. Even though Python 3 is not +* `Python 3`_ >= 3.5.0, plus development headers. Even though Python 3 is not the default Python version on most systems, it is usually available as an optional install. * OpenSSL_ >= 1.0.0, plus development headers. * libacl_ (which depends on libattr_), both plus development headers. -* liblz4_, plus development headers. +* We have bundled code of the following packages, but borg by default (see + setup.py if you want to change that) prefers a shared library if it can + be found on the system (lib + dev headers) at build time: + + - liblz4_ >= 1.7.0 (r129) + - libzstd_ >= 1.3.0 + - libb2_ * some Python dependencies, pip will automatically install them for you * optionally, the llfuse_ Python package is required if you wish to mount an archive as a FUSE filesystem. See setup.py about the version requirements. -* optionally libb2_. If it is not found a bundled implementation is used instead. If you have troubles finding the right package names, have a look at the distribution specific sections below or the Vagrantfile in the git repository, @@ -168,7 +195,6 @@ Install the dependencies with development headers:: sudo apt-get install python3 python3-dev python3-pip python-virtualenv \ libssl-dev openssl \ libacl1-dev libacl1 \ - liblz4-dev liblz4-1 \ build-essential sudo apt-get install libfuse-dev fuse pkg-config # optional, for FUSE support @@ -184,7 +210,6 @@ Install the dependencies with development headers:: sudo dnf install python3 python3-devel python3-pip python3-virtualenv sudo dnf install openssl-devel openssl sudo dnf install libacl-devel libacl - sudo dnf install lz4-devel sudo dnf install gcc gcc-c++ sudo dnf install redhat-rpm-config # not needed in Korora sudo dnf install fuse-devel fuse pkgconfig # optional, for FUSE support @@ -199,26 +224,37 @@ Install the dependencies automatically using zypper:: Alternatively, you can enumerate all build dependencies in the command line:: sudo zypper install python3 python3-devel \ - libacl-devel liblz4-devel openssl-devel \ + libacl-devel openssl-devel \ python3-Cython python3-Sphinx python3-msgpack-python \ python3-pytest python3-setuptools python3-setuptools_scm \ python3-sphinx_rtd_theme python3-llfuse gcc gcc-c++ -Mac OS X -++++++++ +macOS ++++++ -Assuming you have installed homebrew_, the following steps will install all the -dependencies:: +When installing via Homebrew_, dependencies are installed automatically. To install +dependencies manually:: - brew install python3 lz4 openssl - brew install pkg-config # optional, for FUSE support + brew install python3 openssl zstd lz4 xxhash + brew install pkg-config pip3 install virtualenv For FUSE support to mount the backup archives, you need at least version 3.0 of -FUSE for OS X, which is available as a pre-release_. +macFUSE, which is available via `Github +`__, or Homebrew:: + + brew install --cask macfuse + +For OS X Catalina and later, be aware that you must authorize full disk access. +It is no longer sufficient to run borg backups as root. If you have not yet +granted full disk access, and you run Borg backup from cron, you will see +messages such as:: -.. _pre-release: https://github.com/osxfuse/osxfuse/releases + /Users/you/Pictures/Photos Library.photoslibrary: scandir: [Errno 1] Operation not permitted: +To fix this problem, you should grant full disk access to cron, and to your +Terminal application. More information `can be found here +`__. FreeBSD ++++++++ @@ -227,9 +263,9 @@ and commands to make FUSE work for using the mount command. :: - pkg install -y python3 openssl liblz4 fusefs-libs pkgconf + pkg install -y python3 openssl fusefs-libs pkgconf pkg install -y git - python3.4 -m ensurepip # to install pip for Python3 + python3 -m ensurepip # to install pip for Python3 To use the mount command: echo 'fuse_load="YES"' >> /boot/loader.conf echo 'vfs.usermount=1' >> /etc/sysctl.conf @@ -250,20 +286,18 @@ Cygwin ++++++ .. note:: - Running under Cygwin is experimental and has only been tested with Cygwin - (x86-64) v2.5.2. Remote repositories are known broken, local repositories should work. + Running under Cygwin is experimental and has not been tested much yet. Use the Cygwin installer to install the dependencies:: python3 python3-devel python3-setuptools binutils gcc-g++ libopenssl openssl-devel - liblz4_1 liblz4-devel git make openssh You can then install ``pip`` and ``virtualenv``:: - easy_install-3.4 pip + easy_install-3.7 pip pip install virtualenv @@ -287,6 +321,8 @@ This will use ``pip`` to install the latest release from PyPi:: virtualenv --python=python3 borg-env source borg-env/bin/activate + # might be required if your tools are outdated + pip install -U pip setuptools wheel # install Borg + Python dependencies into virtualenv pip install borgbackup # or alternatively (if you want FUSE support): @@ -303,7 +339,9 @@ Using git ~~~~~~~~~ This uses latest, unreleased development code from git. -While we try not to break master, there are no guarantees on anything. :: +While we try not to break master, there are no guarantees on anything. + +:: # get borg from github git clone https://github.com/borgbackup/borg.git @@ -318,8 +356,8 @@ While we try not to break master, there are no guarantees on anything. :: pip install -r requirements.d/fuse.txt # optional, for FUSE support pip install -e . # in-place editable mode - # optional: run all the tests, on all supported Python versions + # optional: run all the tests, on all installed Python versions # requires fakeroot, available through your package manager - fakeroot -u tox + fakeroot -u tox --skip-missing-interpreters .. note:: As a developer or power user, you always want to use a virtual environment. diff --git a/docs/internals/data-structures.rst b/docs/internals/data-structures.rst index dd7eaefd2f..bb774a0536 100644 --- a/docs/internals/data-structures.rst +++ b/docs/internals/data-structures.rst @@ -55,7 +55,7 @@ time for the purposes of the log. Config file ~~~~~~~~~~~ -Each repository has a ``config`` file which which is a ``INI``-style file +Each repository has a ``config`` file which is a ``INI``-style file and looks like this:: [repository] @@ -535,7 +535,7 @@ ACLs/xattrs), the limit will be ~32 million files/directories per archive. If one tries to create an archive object bigger than MAX_OBJECT_SIZE, a fatal IntegrityError will be raised. -A workaround is to create multiple archives with less items each, see +A workaround is to create multiple archives with fewer items each, see also :issue:`1452`. .. _item: @@ -577,7 +577,7 @@ Chunks The |project_name| chunker uses a rolling hash computed by the Buzhash_ algorithm. It triggers (chunks) when the last HASH_MASK_BITS bits of the hash are zero, -producing chunks of 2^HASH_MASK_BITS Bytes on average. +producing chunks with a target size of 2^HASH_MASK_BITS Bytes. Buzhash is **only** used for cutting the chunks at places defined by the content, the buzhash value is **not** used as the deduplication criteria (we @@ -589,13 +589,13 @@ can be used to tune the chunker parameters, the default is: - CHUNK_MIN_EXP = 19 (minimum chunk size = 2^19 B = 512 kiB) - CHUNK_MAX_EXP = 23 (maximum chunk size = 2^23 B = 8 MiB) -- HASH_MASK_BITS = 21 (statistical medium chunk size ~= 2^21 B = 2 MiB) +- HASH_MASK_BITS = 21 (target chunk size ~= 2^21 B = 2 MiB) - HASH_WINDOW_SIZE = 4095 [B] (`0xFFF`) The buzhash table is altered by XORing it with a seed randomly generated once -for the archive, and stored encrypted in the keyfile. This is to prevent chunk -size based fingerprinting attacks on your encrypted repo contents (to guess -what files you have based on a specific set of chunk sizes). +for the repository, and stored encrypted in the keyfile. This is to prevent +chunk size based fingerprinting attacks on your encrypted repo contents (to +guess what files you have based on a specific set of chunk sizes). For some more general usage hints see also ``--chunker-params``. @@ -706,7 +706,7 @@ be estimated like that:: All units are Bytes. It is assuming every chunk is referenced exactly once (if you have a lot of -duplicate chunks, you will have less chunks than estimated above). +duplicate chunks, you will have fewer chunks than estimated above). It is also assuming that typical chunk size is 2^HASH_MASK_BITS (if you have a lot of files smaller than this statistical medium chunk size, you will have @@ -735,7 +735,7 @@ b) with ``create --chunker-params 19,23,21,4095`` (default): mem_usage = 0.31GiB -.. note:: There is also the ``--no-files-cache`` option to switch off the files cache. +.. note:: There is also the ``--files-cache=disabled`` option to disable the files cache. You'll save some memory, but it will need to read / chunk all the files as it can not skip unmodified files then. @@ -910,15 +910,16 @@ Compression - none (no compression, pass through data 1:1) - lz4 (low compression, but super fast) +- zstd (level 1-22 offering a wide range: level 1 is lower compression and high + speed, level 22 is higher compression and lower speed) - since borg 1.1.4 - zlib (level 0-9, level 0 is no compression [but still adding zlib overhead], level 1 is low, level 9 is high compression) - lzma (level 0-9, level 0 is low, level 9 is high compression). -Speed: none > lz4 > zlib > lzma -Compression: lzma > zlib > lz4 > none +Speed: none > lz4 > zlib > lzma, lz4 > zstd +Compression: lzma > zlib > lz4 > none, zstd > lz4 -Be careful, higher zlib and especially lzma compression levels might take a -lot of resources (CPU and memory). +Be careful, higher compression levels might use a lot of resources (CPU/memory). The overall speed of course also depends on the speed of your target storage. If that is slow, using a higher compression level might yield better overall diff --git a/docs/internals/frontends.rst b/docs/internals/frontends.rst index c41d427ebe..0a39c28a0f 100644 --- a/docs/internals/frontends.rst +++ b/docs/internals/frontends.rst @@ -1,5 +1,5 @@ .. include:: ../global.rst.inc -.. highlight:: json +.. highlight:: none .. _json_output: @@ -11,7 +11,23 @@ but does mean that there are no release-to-release guarantees on what you might even for point releases (1.1.x), and there is no documentation beyond the code and the internals documents. Borg does on the other hand provide an API on a command-line level. In other words, a frontend should to -(for example) create a backup archive just invoke :ref:`borg_create`. +(for example) create a backup archive just invoke :ref:`borg_create`, give commandline parameters/options +as needed and parse JSON output from borg. + +Important: JSON output is expected to be UTF-8, but currently borg depends on the locale being configured +for that (must be a UTF-8 locale and *not* "C" or "ascii"), so that Python will choose to encode to UTF-8. +The same applies to any inputs read by borg, they are expected to be UTF-8 encoded also. + +We consider this a bug (see :issue:`2273`) and might fix it later, so borg will use UTF-8 independent of +the locale. + +On POSIX systems, you can usually set environment vars to choose a UTF-8 locale: + +:: + + export LANG=en_US.UTF-8 + export LC_CTYPE=en_US.UTF-8 + Logging ------- @@ -20,6 +36,10 @@ Especially for graphical frontends it is important to be able to convey and refo in meaningful ways. The ``--log-json`` option turns the stderr stream of Borg into a stream of JSON lines, where each line is a JSON object. The *type* key of the object determines its other contents. +.. warning:: JSON logging requires successful argument parsing. Even with ``--log-json`` specified, a + parsing error will be printed in plain text, because logging set-up happens after all arguments are + parsed. + Since JSON can only encode text, any string representing a file system path may miss non-text parts. The following types are in use. Progress information is governed by the usual rules for progress information, @@ -105,6 +125,7 @@ log_message See Prompts_ for the types used by prompts. .. rubric:: Examples (reformatted, each object would be on exactly one line) +.. highlight:: json :ref:`borg_extract` progress:: @@ -155,6 +176,7 @@ environment variable that can be used to override the prompt. It is the same for messages pertaining to the same prompt. .. rubric:: Examples (reformatted, each object would be on exactly one line) +.. highlight:: none Providing an invalid answer:: @@ -209,10 +231,16 @@ Standard output *stdout* is different and more command-dependent than logging. Commands like :ref:`borg_info`, :ref:`borg_create` and :ref:`borg_list` implement a ``--json`` option which turns their regular output into a single JSON object. -Dates are formatted according to ISO-8601 with the strftime format string '%a, %Y-%m-%d %H:%M:%S', -e.g. *Sat, 2016-02-25 23:50:06*. +Some commands, like :ref:`borg_list` and :ref:`borg_diff`, can produce *a lot* of JSON. Since many JSON implementations +don't support a streaming mode of operation, which is pretty much required to deal with this amount of JSON, these +commands implement a ``--json-lines`` option which generates output in the `JSON lines `_ format, +which is simply a number of JSON objects separated by new lines. -The root object at least contains a *repository* key with an object containing: +Dates are formatted according to ISO 8601 in local time. No explicit time zone is specified *at this time* +(subject to change). The equivalent strftime format string is '%Y-%m-%dT%H:%M:%S.%f', +e.g. ``2017-08-07T12:27:20.123456``. + +The root object of '--json' output will contain at least a *repository* key with an object containing: id The ID of the repository, normally 64 hex characters @@ -248,6 +276,8 @@ stats unique_csize Compressed and encrypted size of all chunks +.. highlight: json + Example *borg info* output:: { @@ -267,7 +297,7 @@ Example *borg info* output:: }, "repository": { "id": "0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", - "last_modified": "Mon, 2017-02-27 21:21:58", + "last_modified": "2017-08-07T12:27:20.789123", "location": "/home/user/testrepo" }, "security_dir": "/home/user/.config/borg/security/0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", @@ -311,6 +341,8 @@ command_line Array of strings of the command line that created the archive The note about paths from above applies here as well. +chunker_params + The chunker parameters the archive has been created with. :ref:`borg_info` further has: @@ -321,6 +353,16 @@ username comment Archive comment, if any +Some keys/values are more expensive to compute than others (e.g. because it requires opening the archive, +not just the manifest). To optimize for speed, `borg list repo` does not determine these values except +when they are requested. The `--format` option is used for that (for normal mode as well as for `--json` +mode), so, to have the comment included in the json output, you will need: + +:: + + borg list repo --format "{name}{comment}" --json` + + Example of a simple archive listing (``borg list --last 1 --json``):: { @@ -328,7 +370,7 @@ Example of a simple archive listing (``borg list --last 1 --json``):: { "id": "80cd07219ad725b3c5f665c1dcf119435c4dee1647a560ecac30f8d40221a46a", "name": "host-system-backup-2017-02-27", - "start": "Mon, 2017-02-27 21:21:52" + "start": "2017-08-07T12:27:20.789123" } ], "encryption": { @@ -336,7 +378,7 @@ Example of a simple archive listing (``borg list --last 1 --json``):: }, "repository": { "id": "0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", - "last_modified": "Mon, 2017-02-27 21:21:58", + "last_modified": "2017-08-07T12:27:20.789123", "location": "/home/user/repository" } } @@ -346,6 +388,12 @@ The same archive with more information (``borg info --last 1 --json``):: { "archives": [ { + "chunker_params": [ + 13, + 23, + 16, + 4095 + ], "command_line": [ "/home/user/.local/bin/borg", "create", @@ -354,14 +402,14 @@ The same archive with more information (``borg info --last 1 --json``):: ], "comment": "", "duration": 5.641542, - "end": "Mon, 2017-02-27 21:21:58", + "end": "2017-02-27T12:27:20.789123", "hostname": "host", "id": "80cd07219ad725b3c5f665c1dcf119435c4dee1647a560ecac30f8d40221a46a", "limits": { "max_archive_size": 0.0001330855110409714 }, "name": "host-system-backup-2017-02-27", - "start": "Mon, 2017-02-27 21:21:52", + "start": "2017-02-27T12:27:20.789123", "stats": { "compressed_size": 1880961894, "deduplicated_size": 2791, @@ -387,7 +435,7 @@ The same archive with more information (``borg info --last 1 --json``):: }, "repository": { "id": "0cbe6166b46627fd26b97f8831e2ca97584280a46714ef84d2b668daf8271a23", - "last_modified": "Mon, 2017-02-27 21:21:58", + "last_modified": "2017-08-07T12:27:20.789123", "location": "/home/user/repository" } } @@ -395,18 +443,85 @@ The same archive with more information (``borg info --last 1 --json``):: File listings +++++++++++++ -Listing the contents of an archive can produce *a lot* of JSON. Since many JSON implementations -don't support a streaming mode of operation, which is pretty much required to deal with this amount of -JSON, output is generated in the `JSON lines `_ format, which is simply -a number of JSON objects separated by new lines. - -Each item (file, directory, ...) is described by one object in the :ref:`borg_list` output. +Each archive item (file, directory, ...) is described by one object in the :ref:`borg_list` output. Refer to the *borg list* documentation for the available keys and their meaning. Example (excerpt) of ``borg list --json-lines``:: - {"type": "d", "mode": "drwxr-xr-x", "user": "user", "group": "user", "uid": 1000, "gid": 1000, "path": "linux", "healthy": true, "source": "", "linktarget": "", "flags": null, "isomtime": "Sat, 2016-05-07 19:46:01", "size": 0} - {"type": "d", "mode": "drwxr-xr-x", "user": "user", "group": "user", "uid": 1000, "gid": 1000, "path": "linux/baz", "healthy": true, "source": "", "linktarget": "", "flags": null, "isomtime": "Sat, 2016-05-07 19:46:01", "size": 0} + {"type": "d", "mode": "drwxr-xr-x", "user": "user", "group": "user", "uid": 1000, "gid": 1000, "path": "linux", "healthy": true, "source": "", "linktarget": "", "flags": null, "mtime": "2017-02-27T12:27:20.023407", "size": 0} + {"type": "d", "mode": "drwxr-xr-x", "user": "user", "group": "user", "uid": 1000, "gid": 1000, "path": "linux/baz", "healthy": true, "source": "", "linktarget": "", "flags": null, "mtime": "2017-02-27T12:27:20.585407", "size": 0} + +Archive Differencing +++++++++++++++++++++ + +Each archive difference item (file contents, user/group/mode) output by :ref:`borg_diff` is represented by an *ItemDiff* object. +The propertiese of an *ItemDiff* object are: + +path: + The filename/path of the *Item* (file, directory, symlink). + +changes: + A list of *Change* objects describing the changes made to the item in the two archives. For example, + there will be two changes if the contents of a file are changed, and its ownership are changed. + +The *Change* object can contain a number of properties depending on the type of change that occured. +If a 'property' is not required for the type of change, it is not output. +The possible properties of a *Change* object are: + +type: + The **type** property is always present. It identifies the type of change and will be one of these values: + + - *modified* - file contents changed. + - *added* - the file was added. + - *removed* - the file was removed. + - *added directory* - the directory was added. + - *removed directory* - the directory was removed. + - *added link* - the symlink was added. + - *removed link* - the symlink was removed. + - *changed link* - the symlink target was changed. + - *mode* - the file/directory/link mode was changed. Note - this could indicate a change from a + file/directory/link type to a different type (file/directory/link), such as -- a file is deleted and replaced + with a directory of the same name. + - *owner* - user and/or group ownership changed. + +size: + If **type** == '*added*' or '*removed*', then **size** provides the size of the added or removed file. + +added: + If **type** == '*modified*' and chunk ids can be compared, then **added** and **removed** indicate the amount + of data 'added' and 'removed'. If chunk ids can not be compared, then **added** and **removed** properties are + not provided and the only information available is that the file contents were modified. + +removed: + See **added** property. + +old_mode: + If **type** == '*mode*', then **old_mode** and **new_mode** provide the mode and permissions changes. + +new_mode: + See **old_mode** property. + +old_user: + If **type** == '*owner*', then **old_user**, **new_user**, **old_group** and **new_group** provide the user + and group ownership changes. + +old_group: + See **old_user** property. + +new_user: + See **old_user** property. + +new_group: + See **old_user** property. + + +Example (excerpt) of ``borg diff --json-lines``:: + + {"path": "file1", "changes": [{"path": "file1", "changes": [{"type": "modified", "added": 17, "removed": 5}, {"type": "mode", "old_mode": "-rw-r--r--", "new_mode": "-rwxr-xr-x"}]}]} + {"path": "file2", "changes": [{"type": "modified", "added": 135, "removed": 252}]} + {"path": "file4", "changes": [{"type": "added", "size": 0}]} + {"path": "file3", "changes": [{"type": "removed", "size": 0}]} + .. _msgid: @@ -499,6 +614,8 @@ Errors Insufficient free space to complete transaction (required: {}, available: {}). Repository.InvalidRepository {} is not a valid repository. Check repo config. + Repository.AtticRepository + Attic repository detected. Please run "borg upgrade {}". Repository.ObjectNotFound Object with key {} not found in repository {}. @@ -511,13 +628,17 @@ Operations *info* is one string element, the name of the archive currently synced. - repository.compact_segments - repository.replay_segments - - repository.check_segments + - repository.check - check.verify_data + - check.rebuild_manifest - extract *info* is one string element, the name of the path currently extracted. - extract.permissions - archive.delete + - archive.calc_stats + - prune + - upgrade.convert_segments Prompts BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK @@ -525,8 +646,6 @@ Prompts BORG_RELOCATED_REPO_ACCESS_IS_OK For "Warning: The repository at location ... was previously located at ..." BORG_CHECK_I_KNOW_WHAT_I_AM_DOING - For "Warning: 'check --repair' is an experimental feature that might result in data loss." + For "This is a potentially dangerous function..." (check --repair) BORG_DELETE_I_KNOW_WHAT_I_AM_DOING For "You requested to completely DELETE the repository *including* all archives it contains:" - BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING - For "recreate is an experimental feature." diff --git a/docs/internals/security.rst b/docs/internals/security.rst index 66a7ea5e13..57f8cfcb74 100644 --- a/docs/internals/security.rst +++ b/docs/internals/security.rst @@ -13,6 +13,8 @@ Security Cryptography in Borg ==================== +.. _attack_model: + Attack model ------------ @@ -37,6 +39,10 @@ Under these circumstances Borg guarantees that the attacker cannot The attacker can always impose a denial of service per definition (he could forbid connections to the repository, or delete it entirely). +When the above attack model is extended to include multiple clients +independently updating the same repository, then Borg fails to provide +confidentiality (i.e. guarantees 3) and 4) do not apply any more). + .. _security_structural_auth: Structural Authentication @@ -103,7 +109,7 @@ the tampering. Note that when using BORG_PASSPHRASE the attacker cannot swap the *entire* repository against a new repository with e.g. repokey mode and no passphrase, -because Borg will abort access when BORG_PASSPRHASE is incorrect. +because Borg will abort access when BORG_PASSPHRASE is incorrect. However, interactively a user might not notice this kind of attack immediately, if she assumes that the reason for the absent passphrase @@ -199,7 +205,7 @@ the manifest (which will fail if the client has seen a more recent manifest or has a more recent nonce reservation). If the repository is untrusted, but a trusted synchronization channel exists between clients, the security database could be synchronized between them over -said trusted channel. This is not part of Borgs functionality. +said trusted channel. This is not part of Borg's functionality. .. [#] Using the :ref:`borg key migrate-to-repokey ` command a user can convert repositories created using Attic in "passphrase" @@ -369,3 +375,56 @@ while libssl implements TLS and related protocols. The latter is not used by Borg (cf. `Remote RPC protocol security`_, Borg itself does not implement any network access) and historically contained most vulnerabilities, especially critical ones. The static binaries released by the project contain neither libssl nor the Python ssl/_ssl modules. + +Compression and Encryption +========================== + +Combining encryption with compression can be insecure in some contexts (e.g. online protocols). + +There was some discussion about this in `github issue #1040`_ and for Borg some developers +concluded this is no problem at all, some concluded this is hard and extremely slow to exploit +and thus no problem in practice. + +No matter what, there is always the option not to use compression if you are worried about this. + +.. _github issue #1040: https://github.com/borgbackup/borg/issues/1040 + +Fingerprinting +============== + +Stored chunk sizes +------------------ + +A borg repository does not hide the size of the chunks it stores (size +information is needed to operate the repository). + +The chunks stored are the (compressed and encrypted) output of the chunker, +chunked according to the input data, the chunker's parameters and the secret +chunker seed (which all influence the chunk boundary positions). + +Small files below some specific threshold (default: 512kiB) result in only one +chunk (identical content / size as the original file), bigger files result in +multiple chunks. + +After chunking is done, compression, encryption and authentication are applied, +which influence the sizes of the chunks stored into the repository. + +Within our attack model, an attacker posessing a specific set of files which +he assumes that the victim also posesses (and backups into the repository) +could try a brute force fingerprinting attack based on the chunk sizes in the +repository to prove his assumption. + +Stored chunk proximity +---------------------- + +Borg does not try to obfuscate order / proximity of files it discovers by +recursing through the filesystem. For performance reasons, we sort directory +contents in file inode order (not in file name alphabetical order), so order +fingerprinting is not useful for an attacker. + +But, when new files are close to each other (when looking at recursion / +scanning order), the resulting chunks will be also stored close to each other +in the resulting repository segment file(s). + +This might leak additional information for the chunk size fingerprinting +attack (see above). diff --git a/docs/man/borg-benchmark-crud.1 b/docs/man/borg-benchmark-crud.1 index c763aae31f..cafa43d91d 100644 --- a/docs/man/borg-benchmark-crud.1 +++ b/docs/man/borg-benchmark-crud.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-BENCHMARK-CRUD 1 "2017-06-18" "" "borg backup tool" +.TH BORG-BENCHMARK-CRUD 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-benchmark-crud \- Benchmark Create, Read, Update, Delete for archives. . @@ -32,20 +32,28 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] benchmark crud REPO PATH +borg [common options] benchmark crud [options] REPOSITORY PATH .SH DESCRIPTION .sp This command benchmarks borg CRUD (create, read, update, delete) operations. .sp It creates input data below the given PATH and backups this data into the given REPO. The REPO must already exist (it could be a fresh empty repo or an existing repo, the -command will create / read / update / delete some archives named borg\-test\-data* there. +command will create / read / update / delete some archives named borg\-benchmark\-crud* there. .sp Make sure you have free space there, you\(aqll need about 1GB each (+ overhead). .sp If your repository is encrypted and borg needs a passphrase to unlock the key, use: +.INDENT 0.0 +.INDENT 3.5 .sp +.nf +.ft C BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH +.ft P +.fi +.UNINDENT +.UNINDENT .sp Measurements are done with different input file sizes and counts. The file contents are very artificial (either all zero or all random), @@ -60,13 +68,7 @@ C\-R\- == random files. no dedup, measuring throughput through all processing st .B R\- == borg extract (extract archive, dry\-run, do everything, but do not write files to disk) R\-Z\- == all zero files. Measuring heavily duplicated files. R\-R\- == random files. No duplication here, measuring throughput through all processing -.IP "System Message: ERROR/3 (docs/virtmanpage.rst:, line 56)" -Unexpected indentation. -.INDENT 7.0 -.INDENT 3.5 stages, except writing to disk. -.UNINDENT -.UNINDENT .TP .B U\- == borg create (2nd archive creation of unchanged input files, measure files cache speed) The throughput value is kind of virtual here, it does not actually read the file. @@ -86,8 +88,8 @@ See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP -.B REPO -repo to use for benchmark (must exist) +.B REPOSITORY +repository to use for benchmark (must exist) .TP .B PATH path were to create benchmark input data diff --git a/docs/man/borg-benchmark.1 b/docs/man/borg-benchmark.1 index 79e356ac14..b561628ebf 100644 --- a/docs/man/borg-benchmark.1 +++ b/docs/man/borg-benchmark.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-BENCHMARK 1 "2017-06-18" "" "borg backup tool" +.TH BORG-BENCHMARK 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-benchmark \- benchmark command . diff --git a/docs/man/borg-break-lock.1 b/docs/man/borg-break-lock.1 index 7b4291cd52..e6eb43f549 100644 --- a/docs/man/borg-break-lock.1 +++ b/docs/man/borg-break-lock.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-BREAK-LOCK 1 "2017-06-18" "" "borg backup tool" +.TH BORG-BREAK-LOCK 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-break-lock \- Break the repository lock (e.g. in case it was left by a dead borg. . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] break\-lock REPOSITORY +borg [common options] break\-lock [options] [REPOSITORY] .SH DESCRIPTION .sp This command breaks the repository and cache locks. diff --git a/docs/man/borg-change-passphrase.1 b/docs/man/borg-change-passphrase.1 index d5b3edbfa6..0b8049a1af 100644 --- a/docs/man/borg-change-passphrase.1 +++ b/docs/man/borg-change-passphrase.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-CHANGE-PASSPHRASE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-CHANGE-PASSPHRASE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-change-passphrase \- Change repository key file passphrase . @@ -32,11 +32,16 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] change\-passphrase REPOSITORY +borg [common options] change\-passphrase [options] [REPOSITORY] .SH DESCRIPTION .sp The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. +.sp +Please note that this command only changes the passphrase, but not any +secret protected by it (like e.g. encryption/MAC keys or chunker seed). +Thus, changing the passphrase after passphrase and borg key got compromised +does not protect future (nor past) backups to the same repository. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. diff --git a/docs/man/borg-check.1 b/docs/man/borg-check.1 index cf2996a2ac..386357f3b2 100644 --- a/docs/man/borg-check.1 +++ b/docs/man/borg-check.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-CHECK 1 "2017-06-18" "" "borg backup tool" +.TH BORG-CHECK 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-check \- Check repository consistency . @@ -32,28 +32,31 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] check REPOSITORY_OR_ARCHIVE +borg [common options] check [options] [REPOSITORY_OR_ARCHIVE] .SH DESCRIPTION .sp The check command verifies the consistency of a repository and the corresponding archives. .sp +check \-\-repair is a potentially dangerous function and might lead to data loss +(for kinds of corruption it is not capable of dealing with). BE VERY CAREFUL! +.sp First, the underlying repository data files are checked: .INDENT 0.0 .IP \(bu 2 -For all segments the segment magic (header) is checked +For all segments, the segment magic header is checked. .IP \(bu 2 -For all objects stored in the segments, all metadata (e.g. crc and size) and +For all objects stored in the segments, all metadata (e.g. CRC and size) and all data is read. The read data is checked by size and CRC. Bit rot and other types of accidental damage can be detected this way. .IP \(bu 2 -If we are in repair mode and a integrity error is detected for a segment, -we try to recover as many objects from the segment as possible. +In repair mode, if an integrity error is detected in a segment, try to recover +as many objects from the segment as possible. .IP \(bu 2 -In repair mode, it makes sure that the index is consistent with the data -stored in the segments. +In repair mode, make sure that the index is consistent with the data stored in +the segments. .IP \(bu 2 -If you use a remote repo server via ssh:, the repo check is executed on the -repo server without causing significant network traffic. +If checking a remote repo via \fBssh:\fP, the repo check is executed on the server +without causing significant network traffic. .IP \(bu 2 The repository check can be skipped using the \fB\-\-archives\-only\fP option. .UNINDENT @@ -64,26 +67,24 @@ Second, the consistency and correctness of the archive metadata is verified: Is the repo manifest present? If not, it is rebuilt from archive metadata chunks (this requires reading and decrypting of all metadata and data). .IP \(bu 2 -Check if archive metadata chunk is present. if not, remove archive from -manifest. +Check if archive metadata chunk is present; if not, remove archive from manifest. .IP \(bu 2 For all files (items) in the archive, for all chunks referenced by these -files, check if chunk is present. -If a chunk is not present and we are in repair mode, replace it with a same\-size -replacement chunk of zeros. -If a previously lost chunk reappears (e.g. via a later backup) and we are in -repair mode, the all\-zero replacement chunk will be replaced by the correct chunk. -This requires reading of archive and file metadata, but not data. +files, check if chunk is present. In repair mode, if a chunk is not present, +replace it with a same\-size replacement chunk of zeroes. If a previously lost +chunk reappears (e.g. via a later backup), in repair mode the all\-zero replacement +chunk will be replaced by the correct chunk. This requires reading of archive and +file metadata, but not data. .IP \(bu 2 -If we are in repair mode and we checked all the archives: delete orphaned -chunks from the repo. +In repair mode, when all the archives were checked, orphaned chunks are deleted +from the repo. One cause of orphaned chunks are input file related errors (like +read errors) in the archive creation process. .IP \(bu 2 -if you use a remote repo server via ssh:, the archive check is executed on -the client machine (because if encryption is enabled, the checks will require -decryption and this is always done client\-side, because key access will be -required). +If checking a remote repo via \fBssh:\fP, the archive check is executed on the +client machine because it requires decryption, and this is always done client\-side +as key access is needed. .IP \(bu 2 -The archive checks can be time consuming, they can be skipped using the +The archive checks can be time consuming; they can be skipped using the \fB\-\-repository\-only\fP option. .UNINDENT .sp @@ -91,9 +92,8 @@ The \fB\-\-verify\-data\fP option will perform a full integrity verification (as checking the CRC32 of the segment) of data, which means reading the data from the repository, decrypting and decompressing it. This is a cryptographic verification, which will detect (accidental) corruption. For encrypted repositories it is -tamper\-resistant as well, unless the attacker has access to the keys. -.sp -It is also very slow. +tamper\-resistant as well, unless the attacker has access to the keys. It is also very +slow. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. @@ -121,16 +121,16 @@ attempt to repair any inconsistencies found .B \-\-save\-space work slower, but using less space .UNINDENT -.SS filters +.SS Archive filters .INDENT 0.0 .TP -.B \-P\fP,\fB \-\-prefix +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP -.B \-a\fP,\fB \-\-glob\-archives +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP -.B \-\-sort\-by +.BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N diff --git a/docs/man/borg-common.1 b/docs/man/borg-common.1 index f48ccb6c15..7f0fa2af1e 100644 --- a/docs/man/borg-common.1 +++ b/docs/man/borg-common.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-COMMON 1 "2017-06-18" "" "borg backup tool" +.TH BORG-COMMON 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-common \- Common options of Borg commands . @@ -60,8 +60,11 @@ show progress information .B \-\-log\-json Output one JSON object per log line instead of formatted text. .TP -.BI \-\-lock\-wait \ N -wait for the lock, but max. N seconds (default: 1). +.BI \-\-lock\-wait \ SECONDS +wait at most SECONDS for acquiring a repository/cache lock (default: 1). +.TP +.B \-\-bypass\-lock +Bypass locking mechanism .TP .B \-\-show\-version show/log the borg version @@ -69,16 +72,13 @@ show/log the borg version .B \-\-show\-rc show/log the return code (rc) .TP -.B \-\-no\-files\-cache -do not load/update the file metadata cache used to detect unchanged files -.TP .BI \-\-umask \ M set umask to M (local and remote, default: 0077) .TP .BI \-\-remote\-path \ PATH use PATH as borg executable on the remote (default: "borg") .TP -.BI \-\-remote\-ratelimit \ rate +.BI \-\-remote\-ratelimit \ RATE set remote network upload rate limit in kiByte/s (default: 0=unlimited) .TP .B \-\-consider\-part\-files @@ -86,6 +86,9 @@ treat part files like normal files (e.g. to list/extract them) .TP .BI \-\-debug\-profile \ FILE Write execution profile in Borg format into FILE. For local use a Python\-compatible file can be generated by suffixing FILE with ".pyprof". +.TP +.BI \-\-rsh \ RSH +Use this command to connect to the \(aqborg serve\(aq process (default: \(aqssh\(aq) .UNINDENT .SH SEE ALSO .sp diff --git a/docs/man/borg-compression.1 b/docs/man/borg-compression.1 index 3347a65820..905c5c1c0c 100644 --- a/docs/man/borg-compression.1 +++ b/docs/man/borg-compression.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-COMPRESSION 1 "2017-06-18" "" "borg backup tool" +.TH BORG-COMPRESSION 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-compression \- Details regarding compression . @@ -50,7 +50,13 @@ Valid compression specifiers are: Do not compress. .TP .B lz4 -Use lz4 compression. High speed, low compression. (default) +Use lz4 compression. Very high speed, very low compression. (default) +.TP +.B zstd[,L] +Use zstd ("zstandard") compression, a modern wide\-range algorithm. +If you do not explicitely give the compression level L (ranging from 1 +to 22), it will use level 3. +Archives compressed with zstd are not compatible with borg < 1.1.4. .TP .B zlib[,L] Use zlib ("gz") compression. Medium speed, medium compression. @@ -82,6 +88,8 @@ Examples: .nf .ft C borg create \-\-compression lz4 REPO::ARCHIVE data +borg create \-\-compression zstd REPO::ARCHIVE data +borg create \-\-compression zstd,10 REPO::ARCHIVE data borg create \-\-compression zlib REPO::ARCHIVE data borg create \-\-compression zlib,1 REPO::ARCHIVE data borg create \-\-compression auto,lzma,6 REPO::ARCHIVE data diff --git a/docs/man/borg-config.1 b/docs/man/borg-config.1 new file mode 100644 index 0000000000..76eca361d2 --- /dev/null +++ b/docs/man/borg-config.1 @@ -0,0 +1,110 @@ +.\" Man page generated from reStructuredText. +. +.TH BORG-CONFIG 1 "2021-03-22" "" "borg backup tool" +.SH NAME +borg-config \- get, set, and delete values in a repository or cache config file +. +.nr rst2man-indent-level 0 +. +.de1 rstReportMargin +\\$1 \\n[an-margin] +level \\n[rst2man-indent-level] +level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] +- +\\n[rst2man-indent0] +\\n[rst2man-indent1] +\\n[rst2man-indent2] +.. +.de1 INDENT +.\" .rstReportMargin pre: +. RS \\$1 +. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] +. nr rst2man-indent-level +1 +.\" .rstReportMargin post: +.. +.de UNINDENT +. RE +.\" indent \\n[an-margin] +.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] +.nr rst2man-indent-level -1 +.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] +.in \\n[rst2man-indent\\n[rst2man-indent-level]]u +.. +.SH SYNOPSIS +.sp +borg [common options] config [options] [REPOSITORY] [NAME] [VALUE] +.SH DESCRIPTION +.sp +This command gets and sets options in a local repository or cache config file. +For security reasons, this command only works on local repositories. +.sp +To delete a config value entirely, use \fB\-\-delete\fP\&. To list the values +of the configuration file or the default values, use \fB\-\-list\fP\&. To get and existing +key, pass only the key name. To set a key, pass both the key name and +the new value. Keys can be specified in the format "section.name" or +simply "name"; the section will default to "repository" and "cache" for +the repo and cache configs, respectively. +.sp +By default, borg config manipulates the repository config file. Using \fB\-\-cache\fP +edits the repository cache\(aqs config file instead. +.SH OPTIONS +.sp +See \fIborg\-common(1)\fP for common options of Borg commands. +.SS arguments +.INDENT 0.0 +.TP +.B REPOSITORY +repository to configure +.TP +.B NAME +name of config key +.TP +.B VALUE +new value for key +.UNINDENT +.SS optional arguments +.INDENT 0.0 +.TP +.B \-c\fP,\fB \-\-cache +get and set values from the repo cache +.TP +.B \-d\fP,\fB \-\-delete +delete the key from the config file +.TP +.B \-l\fP,\fB \-\-list +list the configuration of the repo +.UNINDENT +.SH EXAMPLES +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +The repository & cache config files are some of the only directly manipulable +parts of a repository that aren\(aqt versioned or backed up, so be careful when +making changes! +.UNINDENT +.UNINDENT +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +# find cache directory +$ cd ~/.cache/borg/$(borg config /path/to/repo id) + +# reserve some space +$ borg config /path/to/repo additional_free_space 2G + +# make a repo append\-only +$ borg config /path/to/repo append_only 1 +.ft P +.fi +.UNINDENT +.UNINDENT +.SH SEE ALSO +.sp +\fIborg\-common(1)\fP +.SH AUTHOR +The Borg Collective +.\" Generated by docutils manpage writer. +. diff --git a/docs/man/borg-create.1 b/docs/man/borg-create.1 index ec8d7b525b..0e4c4ef98b 100644 --- a/docs/man/borg-create.1 +++ b/docs/man/borg-create.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-CREATE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-CREATE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-create \- Create new archive . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] create ARCHIVE PATH +borg [common options] create [options] ARCHIVE [PATH...] .SH DESCRIPTION .sp This command creates a backup archive containing all files found while recursively @@ -41,7 +41,8 @@ that means if relative paths are desired, the command has to be run from the cor directory. .sp When giving \(aq\-\(aq as path, borg will read data from standard input and create a -file \(aqstdin\(aq in the created archive from that data. +file \(aqstdin\(aq in the created archive from that data. See section \fIReading from +stdin\fP below for details. .sp The archive will consume almost no disk space for files or parts of files that have already been stored in other archives. @@ -53,10 +54,52 @@ checkpoints and treated in special ways. In the archive name, you may use the following placeholders: {now}, {utcnow}, {fqdn}, {hostname}, {user} and some others. .sp -To speed up pulling backups over sshfs and similar network file systems which do -not provide correct inode information the \fB\-\-ignore\-inode\fP flag can be used. This -potentially decreases reliability of change detection, while avoiding always reading -all files on these file systems. +Backup speed is increased by not reprocessing files that are already part of +existing archives and weren\(aqt modified. The detection of unmodified files is +done by comparing multiple file metadata values with previous values kept in +the files cache. +.sp +This comparison can operate in different modes as given by \fB\-\-files\-cache\fP: +.INDENT 0.0 +.IP \(bu 2 +ctime,size,inode (default) +.IP \(bu 2 +mtime,size,inode (default behaviour of borg versions older than 1.1.0rc4) +.IP \(bu 2 +ctime,size (ignore the inode number) +.IP \(bu 2 +mtime,size (ignore the inode number) +.IP \(bu 2 +rechunk,ctime (all files are considered modified \- rechunk, cache ctime) +.IP \(bu 2 +rechunk,mtime (all files are considered modified \- rechunk, cache mtime) +.IP \(bu 2 +disabled (disable the files cache, all files considered modified \- rechunk) +.UNINDENT +.sp +inode number: better safety, but often unstable on network filesystems +.sp +Normally, detecting file modifications will take inode information into +consideration to improve the reliability of file change detection. +This is problematic for files located on sshfs and similar network file +systems which do not provide stable inode numbers, such files will always +be considered modified. You can use modes without \fIinode\fP in this case to +improve performance, but reliability of change detection might be reduced. +.sp +ctime vs. mtime: safety vs. speed +.INDENT 0.0 +.IP \(bu 2 +ctime is a rather safe way to detect changes to a file (metadata and contents) +as it can not be set from userspace. But, a metadata\-only change will already +update the ctime, so there might be some unnecessary chunking/hashing even +without content changes. Some filesystems do not support ctime (change time). +E.g. doing a chown or chmod to a file will change its ctime. +.IP \(bu 2 +mtime usually works and only updates if file contents were changed. But mtime +can be arbitrarily set from userspace, e.g. to set mtime back to the same value +it had before a content change happened. This can be used maliciously as well as +well\-meant, but in both cases mtime based cache modes can be problematic. +.UNINDENT .sp The mount points of filesystems or filesystem snapshots should be the same for every creation of a new archive to ensure fast operation. This is because the file cache that @@ -67,7 +110,14 @@ The \fB\-\-progress\fP option shows (from left to right) Original, Compressed an (O, C and D, respectively), then the Number of files (N) processed so far, followed by the currently processed path. .sp +When using \fB\-\-stats\fP, you will get some statistics about how much data was +added \- the "This Archive" deduplicated size there is most interesting as that is +how much your repository will grow. Please note that the "All archives" stats refer to +the state after creation. Also, the \fB\-\-stats\fP and \fB\-\-dry\-run\fP options are mutually +exclusive because the data is not actually compressed and deduplicated during a dry run. +.sp See the output of the "borg help patterns" command for more help on exclude patterns. +.sp See the output of the "borg help placeholders" command for more help on placeholders. .SH OPTIONS .sp @@ -94,43 +144,61 @@ print statistics for the created archive output verbose list of items (files, dirs, ...) .TP .BI \-\-filter \ STATUSCHARS -only display items with the given status characters +only display items with the given status characters (see description) .TP .B \-\-json -output stats as JSON (implies \-\-stats) +output stats as JSON. Implies \fB\-\-stats\fP\&. .TP .B \-\-no\-cache\-sync -experimental: do not synchronize the cache. Implies \-\-no\-files\-cache. +experimental: do not synchronize the cache. Implies not using the files cache. +.TP +.B \-\-no\-files\-cache +do not load/update the file metadata cache used to detect unchanged files +.TP +.BI \-\-stdin\-name \ NAME +use NAME in archive for stdin data (default: "stdin") +.TP +.BI \-\-stdin\-user \ USER +set user USER in archive for stdin data (default: \(aqroot\(aq) +.TP +.BI \-\-stdin\-group \ GROUP +set group GROUP in archive for stdin data (default: \(aqroot\(aq) +.TP +.BI \-\-stdin\-mode \ M +set mode to M in archive for stdin data (default: 0660) .UNINDENT .SS Exclusion options .INDENT 0.0 .TP -.BI \-e \ PATTERN\fP,\fB \ \-\-exclude \ PATTERN +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP +.BI \-\-pattern \ PATTERN +experimental: include/exclude paths matching PATTERN +.TP +.BI \-\-patterns\-from \ PATTERNFILE +experimental: read include/exclude patterns from PATTERNFILE, one per line +.TP .B \-\-exclude\-caches -exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.brynosaurus.com/cachedir/spec.html\fP) +exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.bford.info/cachedir/spec.html\fP) .TP .BI \-\-exclude\-if\-present \ NAME exclude directories that are tagged by containing a filesystem object with the given NAME .TP .B \-\-keep\-exclude\-tags\fP,\fB \-\-keep\-tag\-files -if tag objects are specified with \-\-exclude\-if\-present, don\(aqt omit the tag objects themselves from the backup archive -.TP -.BI \-\-pattern \ PATTERN -experimental: include/exclude paths matching PATTERN +if tag objects are specified with \fB\-\-exclude\-if\-present\fP, don\(aqt omit the tag objects themselves from the backup archive .TP -.BI \-\-patterns\-from \ PATTERNFILE -experimental: read include/exclude patterns from PATTERNFILE, one per line +.B \-\-exclude\-nodump +exclude files flagged NODUMP .UNINDENT .SS Filesystem options .INDENT 0.0 .TP .B \-x\fP,\fB \-\-one\-file\-system -stay in the same file system and do not store mount points of other file systems +stay in the same file system and do not store mount points of other file systems. This might behave different from your expectations, see the docs. .TP .B \-\-numeric\-owner only store numeric user and group identifiers @@ -141,9 +209,24 @@ do not store atime into archive .B \-\-noctime do not store ctime into archive .TP +.B \-\-nobirthtime +do not store birthtime (creation date) into archive +.TP +.B \-\-nobsdflags +do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive +.TP +.B \-\-noacls +do not read and store ACLs into archive +.TP +.B \-\-noxattrs +do not read and store xattrs into archive +.TP .B \-\-ignore\-inode ignore inode data in the file metadata cache used to detect unchanged files. .TP +.BI \-\-files\-cache \ MODE +operate files cache in MODE. default: ctime,size,inode +.TP .B \-\-read\-special open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. .UNINDENT @@ -154,15 +237,15 @@ open and read block and char device files as well as FIFOs as if they were regul add a comment text to the archive .TP .BI \-\-timestamp \ TIMESTAMP -manually specify the archive creation date/time (UTC, yyyy\-mm\-ddThh:mm:ss format). alternatively, give a reference file/directory. +manually specify the archive creation date/time (UTC, yyyy\-mm\-ddThh:mm:ss format). Alternatively, give a reference file/directory. .TP -.BI \-c \ SECONDS\fP,\fB \ \-\-checkpoint\-interval \ SECONDS +.BI \-c \ SECONDS\fR,\fB \ \-\-checkpoint\-interval \ SECONDS write checkpoint every SECONDS seconds (Default: 1800) .TP .BI \-\-chunker\-params \ PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). default: 19,23,21,4095 .TP -.BI \-C \ COMPRESSION\fP,\fB \ \-\-compression \ COMPRESSION +.BI \-C \ COMPRESSION\fR,\fB \ \-\-compression \ COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. .UNINDENT .SH EXAMPLES @@ -190,7 +273,7 @@ $ borg create /path/to/repo::my\-files /home \e # Backup the root filesystem into an archive named "root\-YYYY\-MM\-DD" # use zlib compression (good, but slow) \- default is lz4 (fast, low compression ratio) -$ borg create \-C zlib,6 /path/to/repo::root\-{now:%Y\-%m\-%d} / \-\-one\-file\-system +$ borg create \-C zlib,6 \-\-one\-file\-system /path/to/repo::root\-{now:%Y\-%m\-%d} / # Backup a remote host locally ("pull" style) using sshfs $ mkdir sshfs\-mount @@ -208,21 +291,24 @@ $ borg create \-\-chunker\-params 10,23,16,4095 /path/to/repo::small /smallstuff # Backup a raw device (must not be active/in use/mounted at that time) $ dd if=/dev/sdx bs=10M | borg create /path/to/repo::my\-sdx \- -# No compression (default) -$ borg create /path/to/repo::arch ~ +# No compression (none) +$ borg create \-\-compression none /path/to/repo::arch ~ -# Super fast, low compression -$ borg create \-\-compression lz4 /path/to/repo::arch ~ +# Super fast, low compression (lz4, default) +$ borg create /path/to/repo::arch ~ -# Less fast, higher compression (N = 0..9) +# Less fast, higher compression (zlib, N = 0..9) $ borg create \-\-compression zlib,N /path/to/repo::arch ~ -# Even slower, even higher compression (N = 0..9) +# Even slower, even higher compression (lzma, N = 0..9) $ borg create \-\-compression lzma,N /path/to/repo::arch ~ +# Only compress compressible data with lzma,N (N = 0..9) +$ borg create \-\-compression auto,lzma,N /path/to/repo::arch ~ + # Use short hostname, user name and current time in archive name $ borg create /path/to/repo::{hostname}\-{user}\-{now} ~ -# Similar, use the same datetime format as borg 1.1 will have as default +# Similar, use the same datetime format that is default as of borg 1.1 $ borg create /path/to/repo::{hostname}\-{user}\-{now:%Y\-%m\-%dT%H:%M:%S} ~ # As above, but add nanoseconds $ borg create /path/to/repo::{hostname}\-{user}\-{now:%Y\-%m\-%dT%H:%M:%S.%f} ~ @@ -250,6 +336,17 @@ all of its contents will be omitted from the backup. If, however, you wish to only include the objects specified by \fB\-\-exclude\-if\-present\fP in your backup, and not include any other contents of the containing folder, this can be enabled through using the \fB\-\-keep\-exclude\-tags\fP option. +.sp +The \fB\-x\fP or \fB\-\-one\-file\-system\fP option excludes directories, that are mountpoints (and everything in them). +It detects mountpoints by comparing the device number from the output of \fBstat()\fP of the directory and its +parent directory. Specifically, it excludes directories for which \fBstat()\fP reports a device number different +from the device number of their parent. Be aware that in Linux (and possibly elsewhere) there are directories +with device number different from their parent, which the kernel does not consider a mountpoint and also the +other way around. Examples are bind mounts (possibly same device number, but always a mountpoint) and ALL +subvolumes of a btrfs (different device number from parent but not necessarily a mountpoint). Therefore when +using \fB\-\-one\-file\-system\fP, one should make doubly sure that the backup works as intended especially when using +btrfs. This is even more important, if the btrfs layout was created by someone else, e.g. a distribution +installer. .SS Item flags .sp \fB\-\-list\fP outputs a list of all files, directories and other @@ -304,6 +401,31 @@ Other flags used include: .IP \(bu 2 \(aq?\(aq = missing status code (if you see this, please file a bug report!) .UNINDENT +.SS Reading from stdin +.sp +To read from stdin, specify \fB\-\fP as path and pipe directly to borg: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +backup\-vm \-\-id myvm \-\-stdout | borg create REPO::ARCHIVE \- +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Note that piping to borg creates an archive even if the command piping +to borg exits with a failure. In this case, \fBone can end up with +truncated output being backed up\fP\&. +.sp +Reading from stdin yields just a stream of data without file metadata +associated with it, and the files cache is not needed at all. So it is +safe to disable it via \fB\-\-no\-files\-cache\fP and speed up backup +creation a bit. +.sp +By default, the content read from stdin is stored in a file called \(aqstdin\(aq. +Use \fB\-\-stdin\-name\fP to change the name. .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-delete(1)\fP, \fIborg\-prune(1)\fP, \fIborg\-check(1)\fP, \fIborg\-patterns(1)\fP, \fIborg\-placeholders(1)\fP, \fIborg\-compression(1)\fP diff --git a/docs/man/borg-delete.1 b/docs/man/borg-delete.1 index 2e8891532b..8d0485869c 100644 --- a/docs/man/borg-delete.1 +++ b/docs/man/borg-delete.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-DELETE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-DELETE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-delete \- Delete an existing repository or archives . @@ -32,46 +32,57 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] delete TARGET +borg [common options] delete [options] [REPOSITORY_OR_ARCHIVE] [ARCHIVE...] .SH DESCRIPTION .sp This command deletes an archive from the repository or the complete repository. Disk space is reclaimed accordingly. If you delete the complete repository, the local cache for it (if any) is also deleted. +.sp +When using \fB\-\-stats\fP, you will get some statistics about how much data was +deleted \- the "Deleted data" deduplicated size there is most interesting as +that is how much your repository will shrink. +Please note that the "All archives" stats refer to the state after deletion. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP -.B TARGET -archive or repository to delete +.B REPOSITORY_OR_ARCHIVE +repository or archive to delete +.TP +.B ARCHIVE +archives to delete .UNINDENT .SS optional arguments .INDENT 0.0 .TP +.B \-n\fP,\fB \-\-dry\-run +do not change repository +.TP .B \-s\fP,\fB \-\-stats print statistics for the deleted archive .TP -.B \-c\fP,\fB \-\-cache\-only +.B \-\-cache\-only delete only the local cache for the given repository .TP .B \-\-force -force deletion of corrupted archives, use \-\-force \-\-force in case \-\-force does not work. +force deletion of corrupted archives, use \fB\-\-force \-\-force\fP in case \fB\-\-force\fP does not work. .TP .B \-\-save\-space work slower, but using less space .UNINDENT -.SS filters +.SS Archive filters .INDENT 0.0 .TP -.B \-P\fP,\fB \-\-prefix +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP -.B \-a\fP,\fB \-\-glob\-archives +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP -.B \-\-sort\-by +.BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N diff --git a/docs/man/borg-diff.1 b/docs/man/borg-diff.1 index ad030a6728..4e6f8a6495 100644 --- a/docs/man/borg-diff.1 +++ b/docs/man/borg-diff.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-DIFF 1 "2017-06-18" "" "borg backup tool" +.TH BORG-DIFF 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-diff \- Diff contents of two archives . @@ -32,12 +32,12 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] diff REPO_ARCHIVE1 ARCHIVE2 PATH +borg [common options] diff [options] REPO::ARCHIVE1 ARCHIVE2 [PATH...] .SH DESCRIPTION .sp This command finds differences (file contents, user/group/mode) between archives. .sp -A repository location and an archive name must be specified for REPO_ARCHIVE1. +A repository location and an archive name must be specified for REPO::ARCHIVE1. ARCHIVE2 is just another archive name in same repository (no repository location allowed). .sp @@ -57,7 +57,7 @@ See \fIborg\-common(1)\fP for common options of Borg commands. .SS arguments .INDENT 0.0 .TP -.B REPO_ARCHIVE1 +.B REPO::ARCHIVE1 repository location and ARCHIVE1 name .TP .B ARCHIVE2 @@ -77,25 +77,19 @@ Override check of chunker parameters. .TP .B \-\-sort Sort the output lines by file path. +.TP +.B \-\-json\-lines +Format output as JSON Lines. .UNINDENT .SS Exclusion options .INDENT 0.0 .TP -.BI \-e \ PATTERN\fP,\fB \ \-\-exclude \ PATTERN +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP -.B \-\-exclude\-caches -exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.brynosaurus.com/cachedir/spec.html\fP) -.TP -.BI \-\-exclude\-if\-present \ NAME -exclude directories that are tagged by containing a filesystem object with the given NAME -.TP -.B \-\-keep\-exclude\-tags\fP,\fB \-\-keep\-tag\-files -if tag objects are specified with \-\-exclude\-if\-present, don\(aqt omit the tag objects themselves from the backup archive -.TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP @@ -120,6 +114,7 @@ $ chmod a+x file1 $ echo "something" >> file2 $ borg create ../testrepo::archive2 . +$ echo "testing 123" >> file1 $ rm file3 $ touch file4 $ borg create ../testrepo::archive3 . @@ -130,14 +125,21 @@ $ borg diff testrepo::archive1 archive2 +135 B \-252 B file2 $ borg diff testrepo::archive2 archive3 + +17 B \-5 B file1 added 0 B file4 removed 0 B file3 $ borg diff testrepo::archive1 archive3 -[\-rw\-r\-\-r\-\- \-> \-rwxr\-xr\-x] file1 + +17 B \-5 B [\-rw\-r\-\-r\-\- \-> \-rwxr\-xr\-x] file1 +135 B \-252 B file2 added 0 B file4 removed 0 B file3 + +$ borg diff \-\-json\-lines testrepo::archive1 archive3 +{"path": "file1", "changes": [{"type": "modified", "added": 17, "removed": 5}, {"type": "mode", "old_mode": "\-rw\-r\-\-r\-\-", "new_mode": "\-rwxr\-xr\-x"}]} +{"path": "file2", "changes": [{"type": "modified", "added": 135, "removed": 252}]} +{"path": "file4", "changes": [{"type": "added", "size": 0}]} +{"path": "file3", "changes": [{"type": "removed", "size": 0}] .ft P .fi .UNINDENT diff --git a/docs/man/borg-export-tar.1 b/docs/man/borg-export-tar.1 index 515b4d849c..726009e815 100644 --- a/docs/man/borg-export-tar.1 +++ b/docs/man/borg-export-tar.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-EXPORT-TAR 1 "2017-06-18" "" "borg backup tool" +.TH BORG-EXPORT-TAR 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-export-tar \- Export archive contents as a tarball . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] export\-tar ARCHIVE FILE PATH +borg [common options] export\-tar [options] ARCHIVE FILE [PATH...] .SH DESCRIPTION .sp This command creates a tarball from an archive. @@ -95,8 +95,11 @@ filter program to pipe data through .TP .B \-\-list output verbose list of items (files, dirs, ...) +.UNINDENT +.SS Exclusion options +.INDENT 0.0 .TP -.BI \-e \ PATTERN\fP,\fB \ \-\-exclude \ PATTERN +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE @@ -109,7 +112,7 @@ experimental: include/exclude paths matching PATTERN experimental: read include/exclude patterns from PATTERNFILE, one per line .TP .BI \-\-strip\-components \ NUMBER -Remove the specified number of leading path elements. Pathnames with fewer elements will be silently skipped. +Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. .UNINDENT .SH EXAMPLES .INDENT 0.0 @@ -124,11 +127,14 @@ $ borg export\-tar /path/to/repo::Monday Monday.tar $ borg export\-tar /path/to/repo::Monday Monday.tar.gz \-\-exclude \(aq*.so\(aq # use higher compression level with gzip -$ borg export\-tar testrepo::linux \-\-tar\-filter="gzip \-9" Monday.tar.gz +$ borg export\-tar \-\-tar\-filter="gzip \-9" testrepo::linux Monday.tar.gz -# export a gzipped tar, but instead of storing it on disk, +# export a tar, but instead of storing it on disk, # upload it to a remote site using curl. -$ borg export\-tar ... \-\-tar\-filter="gzip" \- | curl \-\-data\-binary @\- https://somewhere/to/POST +$ borg export\-tar /path/to/repo::Monday \- | curl \-\-data\-binary @\- https://somewhere/to/POST + +# remote extraction via "tarpipe" +$ borg export\-tar /path/to/repo::Monday \- | ssh somewhere "cd extracted; tar x" .ft P .fi .UNINDENT diff --git a/docs/man/borg-extract.1 b/docs/man/borg-extract.1 index 13a71ab721..4a02aff564 100644 --- a/docs/man/borg-extract.1 +++ b/docs/man/borg-extract.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-EXTRACT 1 "2017-06-18" "" "borg backup tool" +.TH BORG-EXTRACT 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-extract \- Extract archive contents . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] extract ARCHIVE PATH +borg [common options] extract [options] ARCHIVE [PATH...] .SH DESCRIPTION .sp This command extracts the contents of an archive. By default the entire @@ -48,6 +48,14 @@ decrypting, decompressing. .sp \fB\-\-progress\fP can be slower than no progress display, since it makes one additional pass over the archive metadata. +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +Currently, extract always writes into the current working directory ("."), +so make sure you \fBcd\fP to the right place before calling \fBborg extract\fP\&. +.UNINDENT +.UNINDENT .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. @@ -69,7 +77,28 @@ output verbose list of items (files, dirs, ...) .B \-n\fP,\fB \-\-dry\-run do not actually change any files .TP -.BI \-e \ PATTERN\fP,\fB \ \-\-exclude \ PATTERN +.B \-\-numeric\-owner +only obey numeric user and group identifiers +.TP +.B \-\-nobsdflags +do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE) +.TP +.B \-\-noacls +do not extract/set ACLs +.TP +.B \-\-noxattrs +do not extract/set xattrs +.TP +.B \-\-stdout +write all extracted data to stdout +.TP +.B \-\-sparse +create holes in output sparse file from all\-zero chunks +.UNINDENT +.SS Exclusion options +.INDENT 0.0 +.TP +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE @@ -81,17 +110,8 @@ experimental: include/exclude paths matching PATTERN .BI \-\-patterns\-from \ PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line .TP -.B \-\-numeric\-owner -only obey numeric user and group identifiers -.TP .BI \-\-strip\-components \ NUMBER -Remove the specified number of leading path elements. Pathnames with fewer elements will be silently skipped. -.TP -.B \-\-stdout -write all extracted data to stdout -.TP -.B \-\-sparse -create holes in output sparse file from all\-zero chunks +Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. .UNINDENT .SH EXAMPLES .INDENT 0.0 @@ -120,14 +140,6 @@ $ borg extract \-\-stdout /path/to/repo::my\-sdx | dd of=/dev/sdx bs=10M .fi .UNINDENT .UNINDENT -.sp -\fBNOTE:\fP -.INDENT 0.0 -.INDENT 3.5 -Currently, extract always writes into the current working directory ("."), -so make sure you \fBcd\fP to the right place before calling \fBborg extract\fP\&. -.UNINDENT -.UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-mount(1)\fP diff --git a/docs/man/borg-info.1 b/docs/man/borg-info.1 index 338d3ca603..cb76a2681f 100644 --- a/docs/man/borg-info.1 +++ b/docs/man/borg-info.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-INFO 1 "2017-06-18" "" "borg backup tool" +.TH BORG-INFO 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-info \- Show archive details such as disk space used . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] info REPOSITORY_OR_ARCHIVE +borg [common options] info [options] [REPOSITORY_OR_ARCHIVE] .SH DESCRIPTION .sp This command displays detailed information about the specified archive or repository. @@ -40,14 +40,16 @@ This command displays detailed information about the specified archive or reposi Please note that the deduplicated sizes of the individual archives do not add up to the deduplicated size of the repository ("all archives"), because the two are meaning different things: -.INDENT 0.0 -.TP -.B This archive / deduplicated size = amount of data stored ONLY for this archive +.sp +This archive / deduplicated size = amount of data stored ONLY for this archive = unique chunks of this archive. -.TP -.B All archives / deduplicated size = amount of data stored in the repo +All archives / deduplicated size = amount of data stored in the repo = all chunks in the repository. -.UNINDENT +.sp +Borg archives can only contain a limited amount of file metadata. +The size of an archive relative to this limit depends on a number of factors, +mainly the number of files, the lengths of paths and other metadata stored for files. +This is shown as \fIutilization of maximum supported archive size\fP\&. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. @@ -55,7 +57,7 @@ See \fIborg\-common(1)\fP for common options of Borg commands. .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE -archive or repository to display information about +repository or archive to display information about .UNINDENT .SS optional arguments .INDENT 0.0 @@ -63,16 +65,16 @@ archive or repository to display information about .B \-\-json format output as JSON .UNINDENT -.SS filters +.SS Archive filters .INDENT 0.0 .TP -.B \-P\fP,\fB \-\-prefix +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP -.B \-a\fP,\fB \-\-glob\-archives +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP -.B \-\-sort\-by +.BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N @@ -87,22 +89,58 @@ consider last N archives after other filters were applied .sp .nf .ft C -$ borg info /path/to/repo::root\-2016\-02\-15 -Name: root\-2016\-02\-15 -Fingerprint: 57c827621f21b000a8d363c1e163cc55983822b3afff3a96df595077a660be50 +$ borg info /path/to/repo::2017\-06\-29T11:00\-srv +Archive name: 2017\-06\-29T11:00\-srv +Archive fingerprint: b2f1beac2bd553b34e06358afa45a3c1689320d39163890c5bbbd49125f00fe5 +Comment: Hostname: myhostname Username: root -Time (start): Mon, 2016\-02\-15 19:36:29 -Time (end): Mon, 2016\-02\-15 19:39:26 -Command line: /usr/local/bin/borg create \-\-list \-C zlib,6 /path/to/repo::root\-2016\-02\-15 / \-\-one\-file\-system -Number of files: 38100 +Time (start): Thu, 2017\-06\-29 11:03:07 +Time (end): Thu, 2017\-06\-29 11:03:13 +Duration: 5.66 seconds +Number of files: 17037 +Command line: /usr/sbin/borg create /path/to/repo::2017\-06\-29T11:00\-srv /srv +Utilization of max. archive size: 0% +\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- + Original size Compressed size Deduplicated size +This archive: 12.53 GB 12.49 GB 1.62 kB +All archives: 121.82 TB 112.41 TB 215.42 GB + + Unique chunks Total chunks +Chunk index: 1015213 626934122 + +$ borg info /path/to/repo \-\-last 1 +Archive name: 2017\-06\-29T11:00\-srv +Archive fingerprint: b2f1beac2bd553b34e06358afa45a3c1689320d39163890c5bbbd49125f00fe5 +Comment: +Hostname: myhostname +Username: root +Time (start): Thu, 2017\-06\-29 11:03:07 +Time (end): Thu, 2017\-06\-29 11:03:13 +Duration: 5.66 seconds +Number of files: 17037 +Command line: /usr/sbin/borg create /path/to/repo::2017\-06\-29T11:00\-srv /srv +Utilization of max. archive size: 0% +\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- + Original size Compressed size Deduplicated size +This archive: 12.53 GB 12.49 GB 1.62 kB +All archives: 121.82 TB 112.41 TB 215.42 GB + + Unique chunks Total chunks +Chunk index: 1015213 626934122 +$ borg info /path/to/repo +Repository ID: d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 +Location: /path/to/repo +Encrypted: Yes (repokey) +Cache: /root/.cache/borg/d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 +Security dir: /root/.config/borg/security/d857ce5788c51272c61535062e89eac4e8ef5a884ffbe976e0af9d8765dedfa5 +\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\-\- Original size Compressed size Deduplicated size -This archive: 1.33 GB 613.25 MB 571.64 MB -All archives: 1.63 GB 853.66 MB 584.12 MB +All archives: 121.82 TB 112.41 TB 215.42 GB Unique chunks Total chunks -Chunk index: 36858 48844 +Chunk index: 1015213 626934122 .ft P .fi .UNINDENT diff --git a/docs/man/borg-init.1 b/docs/man/borg-init.1 index 9576afa8d7..5b5796f846 100644 --- a/docs/man/borg-init.1 +++ b/docs/man/borg-init.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-INIT 1 "2017-06-18" "" "borg backup tool" +.TH BORG-INIT 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-init \- Initialize an empty repository . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] init REPOSITORY +borg [common options] init [options] [REPOSITORY] .SH DESCRIPTION .sp This command initializes an empty repository. A repository is a filesystem @@ -43,7 +43,11 @@ Encryption can be enabled at repository init time. It cannot be changed later. It is not recommended to work without encryption. Repository encryption protects you e.g. against the case that an attacker has access to your backup repository. .sp -But be careful with the key / the passphrase: +Borg relies on randomly generated key material and uses that for chunking, id +generation, encryption and authentication. The key material is encrypted using +the passphrase you give before it is stored on\-disk. +.sp +You need to be careful with the key / the passphrase: .sp If you want "passphrase\-only" security, use one of the repokey modes. The key will be stored inside the repository (in its "config" file). In above @@ -81,6 +85,12 @@ a different keyboard layout. You can change your passphrase for existing repos at any time, it won\(aqt affect the encryption/decryption key or other secrets. .SS Encryption modes +.sp +You can choose from the encryption modes seen in the table below on a per\-repo +basis. The mode determines encryption algorithm, hash/MAC algorithm and also the +key storage location. +.sp +Example: \fIborg init \-\-encryption repokey ...\fP .\" nanorst: inline-fill . .TS @@ -126,7 +136,8 @@ _ .\" nanorst: inline-replace . .sp -\fIMarked modes\fP are new in Borg 1.1 and are not backwards\-compatible with Borg 1.0.x. +Modes \fImarked like this\fP in the above table are new in Borg 1.1 and are not +backwards\-compatible with Borg 1.0.x. .sp On modern Intel/AMD CPUs (except very cheap ones), AES is usually hardware\-accelerated. @@ -159,8 +170,8 @@ from the other blake2 modes. This mode is new and \fInot\fP compatible with Borg 1.0.x. .sp \fInone\fP mode uses no encryption and no authentication. It uses SHA256 as chunk -ID hash. Not recommended, rather consider using an authenticated or -authenticated/encrypted mode. This mode has possible denial\-of\-service issues +ID hash. This mode is not recommended, you should rather consider using an authenticated +or authenticated/encrypted mode. This mode has possible denial\-of\-service issues when running \fBborg create\fP on contents controlled by an attacker. Use it only for new repositories where no encryption is wanted \fBand\fP when compatibility with 1.0.x is important. If compatibility with 1.0.x is not important, use @@ -178,14 +189,17 @@ repository to create .SS optional arguments .INDENT 0.0 .TP -.B \-e\fP,\fB \-\-encryption +.BI \-e \ MODE\fR,\fB \ \-\-encryption \ MODE select encryption key mode \fB(required)\fP .TP .B \-\-append\-only create an append\-only mode repository .TP -.B \-\-storage\-quota +.BI \-\-storage\-quota \ QUOTA Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. +.TP +.B \-\-make\-parent\-dirs +create the parent directories of the repository directory, if they are missing. .UNINDENT .SH EXAMPLES .INDENT 0.0 @@ -200,9 +214,11 @@ $ borg init \-\-encryption=repokey\-blake2 /path/to/repo $ borg init \-\-encryption=none /path/to/repo # Remote repository (accesses a remote borg via ssh) +# repokey: stores the (encrypted) key into /config $ borg init \-\-encryption=repokey\-blake2 user@hostname:backup -# Remote repository (store the key your home dir) +# Remote repository (accesses a remote borg via ssh) +# keyfile: stores the (encrypted) key into ~/.config/borg/keys/ $ borg init \-\-encryption=keyfile user@hostname:backup .ft P .fi diff --git a/docs/man/borg-key-change-passphrase.1 b/docs/man/borg-key-change-passphrase.1 index 88d1b2c1a0..7b1c2ca5c7 100644 --- a/docs/man/borg-key-change-passphrase.1 +++ b/docs/man/borg-key-change-passphrase.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-KEY-CHANGE-PASSPHRASE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-KEY-CHANGE-PASSPHRASE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-key-change-passphrase \- Change repository key file passphrase . @@ -32,11 +32,16 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] key change\-passphrase REPOSITORY +borg [common options] key change\-passphrase [options] [REPOSITORY] .SH DESCRIPTION .sp The key files used for repository encryption are optionally passphrase protected. This command can be used to change this passphrase. +.sp +Please note that this command only changes the passphrase, but not any +secret protected by it (like e.g. encryption/MAC keys or chunker seed). +Thus, changing the passphrase after passphrase and borg key got compromised +does not protect future (nor past) backups to the same repository. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. diff --git a/docs/man/borg-key-export.1 b/docs/man/borg-key-export.1 index 236886831f..4675feb58b 100644 --- a/docs/man/borg-key-export.1 +++ b/docs/man/borg-key-export.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-KEY-EXPORT 1 "2017-06-18" "" "borg backup tool" +.TH BORG-KEY-EXPORT 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-key-export \- Export the repository key for backup . @@ -32,11 +32,14 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] key export REPOSITORY PATH +borg [common options] key export [options] [REPOSITORY] [PATH] .SH DESCRIPTION .sp If repository encryption is used, the repository is inaccessible without the key. This command allows to backup this essential key. +Note that the backup produced does not include the passphrase itself +(i.e. the exported key stays encrypted). In order to regain access to a +repository, one needs both the exported key and the original passphrase. .sp There are two backup formats. The normal backup format is suitable for digital storage as a file. The \fB\-\-paper\fP backup format is optimized diff --git a/docs/man/borg-key-import.1 b/docs/man/borg-key-import.1 index 92a1754d89..3637cbceef 100644 --- a/docs/man/borg-key-import.1 +++ b/docs/man/borg-key-import.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-KEY-IMPORT 1 "2017-06-18" "" "borg backup tool" +.TH BORG-KEY-IMPORT 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-key-import \- Import the repository key from backup . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] key import REPOSITORY PATH +borg [common options] key import [options] [REPOSITORY] [PATH] .SH DESCRIPTION .sp This command allows to restore a key previously backed up with the @@ -50,7 +50,7 @@ REPOSITORY .INDENT 0.0 .TP .B PATH -path to the backup +path to the backup (\(aq\-\(aq to read from stdin) .UNINDENT .SS optional arguments .INDENT 0.0 diff --git a/docs/man/borg-key-migrate-to-repokey.1 b/docs/man/borg-key-migrate-to-repokey.1 index 0d408612e1..e2bef3671f 100644 --- a/docs/man/borg-key-migrate-to-repokey.1 +++ b/docs/man/borg-key-migrate-to-repokey.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-KEY-MIGRATE-TO-REPOKEY 1 "2017-06-18" "" "borg backup tool" +.TH BORG-KEY-MIGRATE-TO-REPOKEY 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-key-migrate-to-repokey \- Migrate passphrase -> repokey . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] key migrate\-to\-repokey REPOSITORY +borg [common options] key migrate\-to\-repokey [options] [REPOSITORY] .SH DESCRIPTION .sp This command migrates a repository from passphrase mode (removed in Borg 1.0) diff --git a/docs/man/borg-key.1 b/docs/man/borg-key.1 index 0915aa5fda..394ec33405 100644 --- a/docs/man/borg-key.1 +++ b/docs/man/borg-key.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-KEY 1 "2017-06-18" "" "borg backup tool" +.TH BORG-KEY 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-key \- Manage a keyfile or repokey of a repository . diff --git a/docs/man/borg-list.1 b/docs/man/borg-list.1 index 3bceff410b..0ba0564226 100644 --- a/docs/man/borg-list.1 +++ b/docs/man/borg-list.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-LIST 1 "2017-06-18" "" "borg backup tool" +.TH BORG-LIST 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-list \- List archive or repository contents . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] list REPOSITORY_OR_ARCHIVE PATH +borg [common options] list [options] [REPOSITORY_OR_ARCHIVE] [PATH...] .SH DESCRIPTION .sp This command lists the contents of a repository or an archive. @@ -45,7 +45,7 @@ See \fIborg\-common(1)\fP for common options of Borg commands. .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE -repository/archive to list contents of +repository or archive to list contents of .TP .B PATH paths to list; patterns are supported @@ -56,9 +56,8 @@ paths to list; patterns are supported .B \-\-short only print file/directory names, nothing else .TP -.B \-\-format\fP,\fB \-\-list\-format -specify format for file listing -(default: "{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NL}") +.BI \-\-format \ FORMAT\fR,\fB \ \-\-list\-format \ FORMAT +specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}") .TP .B \-\-json Only valid for listing repository contents. Format output as JSON. The form of \fB\-\-format\fP is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "barchive" key is therefore not available. @@ -66,16 +65,16 @@ Only valid for listing repository contents. Format output as JSON. The form of \ .B \-\-json\-lines Only valid for listing archive contents. Format output as JSON Lines. The form of \fB\-\-format\fP is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "bpath" key is therefore not available. .UNINDENT -.SS filters +.SS Archive filters .INDENT 0.0 .TP -.B \-P\fP,\fB \-\-prefix +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP -.B \-a\fP,\fB \-\-glob\-archives +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP -.B \-\-sort\-by +.BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N @@ -87,21 +86,12 @@ consider last N archives after other filters were applied .SS Exclusion options .INDENT 0.0 .TP -.BI \-e \ PATTERN\fP,\fB \ \-\-exclude \ PATTERN +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP -.B \-\-exclude\-caches -exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.brynosaurus.com/cachedir/spec.html\fP) -.TP -.BI \-\-exclude\-if\-present \ NAME -exclude directories that are tagged by containing a filesystem object with the given NAME -.TP -.B \-\-keep\-exclude\-tags\fP,\fB \-\-keep\-tag\-files -if tag objects are specified with \-\-exclude\-if\-present, don\(aqt omit the tag objects themselves from the backup archive -.TP .BI \-\-pattern \ PATTERN experimental: include/exclude paths matching PATTERN .TP @@ -129,7 +119,14 @@ lrwxrwxrwx root root 0 Fri, 2015\-03\-27 20:24:26 bin/bzcmp \-> bzdif \-rwxr\-xr\-x root root 2140 Fri, 2015\-03\-27 20:24:22 bin/bzdiff \&... -$ borg list /path/to/repo::archiveA \-\-list\-format="{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NEWLINE}" +$ borg list /path/to/repo::root\-2016\-02\-15 \-\-pattern "\- bin/ba*" +drwxr\-xr\-x root root 0 Mon, 2016\-02\-15 17:44:27 . +drwxrwxr\-x root root 0 Mon, 2016\-02\-15 19:04:49 bin +lrwxrwxrwx root root 0 Fri, 2015\-03\-27 20:24:26 bin/bzcmp \-> bzdiff +\-rwxr\-xr\-x root root 2140 Fri, 2015\-03\-27 20:24:22 bin/bzdiff +\&... + +$ borg list /path/to/repo::archiveA \-\-format="{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NEWLINE}" drwxrwxr\-x user user 0 Sun, 2015\-02\-01 11:00:00 . drwxrwxr\-x user user 0 Sun, 2015\-02\-01 11:00:00 code drwxrwxr\-x user user 0 Sun, 2015\-02\-01 11:00:00 code/myproject @@ -162,13 +159,27 @@ LF Keys for listing repository archives: .INDENT 0.0 .IP \(bu 2 -archive, name: archive name interpreted as text (might be missing non\-text characters, see barchive) +archive: archive name interpreted as text (might be missing non\-text characters, see barchive) +.IP \(bu 2 +name: alias of "archive" .IP \(bu 2 barchive: verbatim archive name, can contain any character except NUL .IP \(bu 2 -time: time of creation of the archive +comment: archive comment interpreted as text (might be missing non\-text characters, see bcomment) +.IP \(bu 2 +bcomment: verbatim archive comment, can contain any character except NUL .IP \(bu 2 id: internal ID of the archive +.IP \(bu 2 +start: time (start) of creation of the archive +.IP \(bu 2 +time: alias of "start" +.IP \(bu 2 +end: time (end) of creation of the archive +.IP \(bu 2 +hostname: hostname of host on which this archive was created +.IP \(bu 2 +username: username of user who created this archive .UNINDENT .sp Keys for listing archive files: @@ -220,6 +231,10 @@ isoctime .IP \(bu 2 isoatime .IP \(bu 2 +blake2b +.IP \(bu 2 +blake2s +.IP \(bu 2 md5 .IP \(bu 2 sha1 @@ -230,8 +245,20 @@ sha256 .IP \(bu 2 sha384 .IP \(bu 2 +sha3_224 +.IP \(bu 2 +sha3_256 +.IP \(bu 2 +sha3_384 +.IP \(bu 2 +sha3_512 +.IP \(bu 2 sha512 .IP \(bu 2 +shake_128 +.IP \(bu 2 +shake_256 +.IP \(bu 2 archiveid .IP \(bu 2 archivename diff --git a/docs/man/borg-mount.1 b/docs/man/borg-mount.1 index d1892ac129..1162ddaa64 100644 --- a/docs/man/borg-mount.1 +++ b/docs/man/borg-mount.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-MOUNT 1 "2017-06-18" "" "borg backup tool" +.TH BORG-MOUNT 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-mount \- Mount archive or an entire repository as a FUSE filesystem . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] mount REPOSITORY_OR_ARCHIVE MOUNTPOINT +borg [common options] mount [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] .SH DESCRIPTION .sp This command mounts an archive as a FUSE filesystem. This can be useful for @@ -47,8 +47,9 @@ used in fstab entries: To allow a regular user to use fstab entries, add the \fBuser\fP option: \fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0\fP .sp -For mount options, see the fuse(8) manual page. Additional mount options -supported by borg: +For FUSE configuration and mount options, see the mount.fuse(8) manual page. +.sp +Additional mount options supported by borg: .INDENT 0.0 .IP \(bu 2 versions: when used with a repository mount, this gives a merged, versioned @@ -57,6 +58,10 @@ view of the files in the archives. EXPERIMENTAL, layout may change in future. allow_damaged_files: by default damaged files (where missing chunks were replaced with runs of zeros by borg check \fB\-\-repair\fP) are not readable and return EIO (I/O error). Set this option to read such files. +.IP \(bu 2 +ignore_permissions: for security reasons the "default_permissions" mount +option is internally enforced by borg. "ignore_permissions" can be given to +not enforce "default_permissions". .UNINDENT .sp The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users @@ -77,10 +82,13 @@ See \fIborg\-common(1)\fP for common options of Borg commands. .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE -repository/archive to mount +repository or archive to mount .TP .B MOUNTPOINT where to mount filesystem +.TP +.B PATH +paths to extract; patterns are supported .UNINDENT .SS optional arguments .INDENT 0.0 @@ -91,16 +99,16 @@ stay in foreground, do not daemonize .B \-o Extra mount options .UNINDENT -.SS filters +.SS Archive filters .INDENT 0.0 .TP -.B \-P\fP,\fB \-\-prefix +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP -.B \-a\fP,\fB \-\-glob\-archives +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .TP -.B \-\-sort\-by +.BI \-\-sort\-by \ KEYS Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp .TP .BI \-\-first \ N @@ -109,6 +117,24 @@ consider first N archives after other filters were applied .BI \-\-last \ N consider last N archives after other filters were applied .UNINDENT +.SS Exclusion options +.INDENT 0.0 +.TP +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN +exclude paths matching PATTERN +.TP +.BI \-\-exclude\-from \ EXCLUDEFILE +read exclude patterns from EXCLUDEFILE, one per line +.TP +.BI \-\-pattern \ PATTERN +experimental: include/exclude paths matching PATTERN +.TP +.BI \-\-patterns\-from \ PATTERNFILE +experimental: read include/exclude patterns from PATTERNFILE, one per line +.TP +.BI \-\-strip\-components \ NUMBER +Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. +.UNINDENT .SH SEE ALSO .sp \fIborg\-common(1)\fP, \fIborg\-umount(1)\fP, \fIborg\-extract(1)\fP diff --git a/docs/man/borg-patterns.1 b/docs/man/borg-patterns.1 index 239a441f0e..f5c6558171 100644 --- a/docs/man/borg-patterns.1 +++ b/docs/man/borg-patterns.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-PATTERNS 1 "2017-06-18" "" "borg backup tool" +.TH BORG-PATTERNS 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-patterns \- Details regarding patterns . @@ -32,13 +32,31 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH DESCRIPTION .sp +The path/filenames used as input for the pattern matching start from the +currently active recursion root. You usually give the recursion root(s) +when invoking borg and these can be either relative or absolute paths. +.sp +So, when you give \fIrelative/\fP as root, the paths going into the matcher +will look like \fIrelative/.../file.ext\fP\&. When you give \fI/absolute/\fP as +root, they will look like \fI/absolute/.../file.ext\fP\&. This is meant when +we talk about "full path" below. +.sp +File paths in Borg archives are always stored normalized and relative. +This means that e.g. \fBborg create /path/to/repo ../some/path\fP will +store all files as \fIsome/path/.../file.ext\fP and \fBborg create +/path/to/repo /home/user\fP will store all files as +\fIhome/user/.../file.ext\fP\&. Therefore, always use relative paths in your +patterns when matching archive content in commands like \fBextract\fP or +\fBmount\fP\&. Starting with Borg 1.2 this behaviour will be changed to +accept both absolute and relative paths. +.sp File patterns support these styles: fnmatch, shell, regular expressions, path prefixes and path full\-matches. By default, fnmatch is used for -\fB\-\-exclude\fP patterns and shell\-style is used for the experimental \fB\-\-pattern\fP -option. +\fB\-\-exclude\fP patterns and shell\-style is used for the experimental +\fB\-\-pattern\fP option. .sp -If followed by a colon (\(aq:\(aq) the first two characters of a pattern are used as a -style selector. Explicit style selection is necessary when a +If followed by a colon (\(aq:\(aq) the first two characters of a pattern are +used as a style selector. Explicit style selection is necessary when a non\-default style is desired or when the desired pattern starts with two alphanumeric characters followed by a colon (i.e. \fIaa:something/*\fP). .INDENT 0.0 @@ -49,17 +67,17 @@ These patterns use a variant of shell pattern syntax, with \(aq*\(aq matching any number of characters, \(aq?\(aq matching any single character, \(aq[...]\(aq matching any single character specified, including ranges, and \(aq[!...]\(aq matching any character not specified. For the purpose of these patterns, -the path separator (\(aq\(aq for Windows and \(aq/\(aq on other systems) is not +the path separator (backslash for Windows and \(aq/\(aq on other systems) is not treated specially. Wrap meta\-characters in brackets for a literal match (i.e. \fI[?]\fP to match the literal character \fI?\fP). For a path -to match a pattern, it must completely match from start to end, or -must match from the start to just before a path separator. Except +to match a pattern, the full path must match, or it must match +from the start of the full path to just before a path separator. Except for the root path, paths will never end in the path separator when matching is attempted. Thus, if a given pattern ends in a path separator, a \(aq*\(aq is appended before matching is attempted. .TP .B Shell\-style patterns, selector \fIsh:\fP -This is the default style for \-\-pattern and \-\-patterns\-from. +This is the default style for \fB\-\-pattern\fP and \fB\-\-patterns\-from\fP\&. Like fnmatch patterns these are similar to shell patterns. The difference is that the pattern may include \fI**/\fP for matching zero or more directory levels, \fI*\fP for matching zero or more arbitrary characters with the @@ -67,23 +85,23 @@ exception of any path separator. .TP .B Regular expressions, selector \fIre:\fP Regular expressions similar to those found in Perl are supported. Unlike -shell patterns regular expressions are not required to match the complete +shell patterns regular expressions are not required to match the full path and any substring match is sufficient. It is strongly recommended to anchor patterns to the start (\(aq^\(aq), to the end (\(aq$\(aq) or both. Path -separators (\(aq\(aq for Windows and \(aq/\(aq on other systems) in paths are +separators (backslash for Windows and \(aq/\(aq on other systems) in paths are always normalized to a forward slash (\(aq/\(aq) before applying a pattern. The regular expression syntax is described in the \fI\%Python documentation for the re module\fP\&. .TP .B Path prefix, selector \fIpp:\fP This pattern style is useful to match whole sub\-directories. The pattern -\fIpp:/data/bar\fP matches \fI/data/bar\fP and everything therein. +\fIpp:root/somedir\fP matches \fIroot/somedir\fP and everything therein. .TP .B Path full\-match, selector \fIpf:\fP -This pattern style is useful to match whole paths. +This pattern style is (only) useful to match full paths. This is kind of a pseudo pattern as it can not have any variable or -unspecified parts \- the full, precise path must be given. -\fIpf:/data/foo.txt\fP matches \fI/data/foo.txt\fP only. +unspecified parts \- the full path must be given. +\fIpf:root/file.ext\fP matches \fIroot/file.ext\fP only. .sp Implementation note: this is implemented via very time\-efficient O(1) hashtable lookups (this means you can have huge amounts of such patterns @@ -108,16 +126,19 @@ wildcards at most. .UNINDENT .sp Exclusions can be passed via the command line option \fB\-\-exclude\fP\&. When used -from within a shell the patterns should be quoted to protect them from +from within a shell, the patterns should be quoted to protect them from expansion. .sp The \fB\-\-exclude\-from\fP option permits loading exclusion patterns from a text file with one pattern per line. Lines empty or starting with the number sign (\(aq#\(aq) after removing whitespace on both ends are ignored. The optional style selector prefix is also supported for patterns loaded from a file. Due to -whitespace removal paths with whitespace at the beginning or end can only be +whitespace removal, paths with whitespace at the beginning or end can only be excluded using regular expressions. .sp +To test your exclusion patterns without performing an actual backup you can +run \fBborg create \-\-list \-\-dry\-run ...\fP\&. +.sp Examples: .INDENT 0.0 .INDENT 3.5 @@ -147,8 +168,10 @@ $ cat >exclude.txt < REPOSITORY +borg [common options] prune [options] [REPOSITORY] .SH DESCRIPTION .sp The prune command prunes a repository by deleting all archives not matching @@ -73,6 +73,11 @@ negative number of archives to keep means that there is no limit. The \fB\-\-keep\-last N\fP option is doing the same as \fB\-\-keep\-secondly N\fP (and it will keep the last N archives under the assumption that you do not create more than one backup archive in the same second). +.sp +When using \fB\-\-stats\fP, you will get some statistics about how much data was +deleted \- the "Deleted data" deduplicated size there is most interesting as +that is how much your repository will shrink. +Please note that the "All archives" stats refer to the state after pruning. .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. @@ -89,7 +94,7 @@ repository to prune do not change repository .TP .B \-\-force -force pruning of corrupted archives +force pruning of corrupted archives, use \fB\-\-force \-\-force\fP in case \fB\-\-force\fP does not work. .TP .B \-s\fP,\fB \-\-stats print statistics for the deleted archive @@ -97,7 +102,7 @@ print statistics for the deleted archive .B \-\-list output verbose list of archives it keeps/prunes .TP -.BI \-\-keep\-within \ WITHIN +.BI \-\-keep\-within \ INTERVAL keep all archives within this time interval .TP .B \-\-keep\-last\fP,\fB \-\-keep\-secondly @@ -124,13 +129,13 @@ number of yearly archives to keep .B \-\-save\-space work slower, but using less space .UNINDENT -.SS filters +.SS Archive filters .INDENT 0.0 .TP -.B \-P\fP,\fB \-\-prefix +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX only consider archive names starting with this prefix. .TP -.B \-a\fP,\fB \-\-glob\-archives +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. .UNINDENT .SH EXAMPLES @@ -145,8 +150,6 @@ prefix "foo" if you do not also want to match "foobar". .sp It is strongly recommended to always run \fBprune \-v \-\-list \-\-dry\-run ...\fP first so you will see what it would do without it actually doing anything. -.sp -There is also a visualized prune example in \fBdocs/misc/prune\-example.txt\fP\&. .INDENT 0.0 .INDENT 3.5 .sp @@ -171,6 +174,8 @@ $ borg prune \-v \-\-list \-\-keep\-within=10d \-\-keep\-weekly=4 \-\-keep\-mont .fi .UNINDENT .UNINDENT +.sp +There is also a visualized prune example in \fBdocs/misc/prune\-example.txt\fP\&. .SH SEE ALSO .sp \fIborg\-common(1)\fP diff --git a/docs/man/borg-recreate.1 b/docs/man/borg-recreate.1 index 9124d11017..6dcb95d7c4 100644 --- a/docs/man/borg-recreate.1 +++ b/docs/man/borg-recreate.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-RECREATE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-RECREATE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-recreate \- Re-create archives . @@ -32,16 +32,18 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] recreate REPOSITORY_OR_ARCHIVE PATH +borg [common options] recreate [options] [REPOSITORY_OR_ARCHIVE] [PATH...] .SH DESCRIPTION .sp Recreate the contents of existing archives. .sp -This is an \fIexperimental\fP feature. Do \fInot\fP use this on your only backup. +recreate is a potentially dangerous function and might lead to data loss +(if used wrongly). BE VERY CAREFUL! .sp -\fB\-\-exclude\fP, \fB\-\-exclude\-from\fP, \fB\-\-exclude\-if\-present\fP, \fB\-\-keep\-exclude\-tags\fP, and PATH -have the exact same semantics as in "borg create". If PATHs are specified the -resulting archive will only contain files from these PATHs. +\fB\-\-exclude\fP, \fB\-\-exclude\-from\fP, \fB\-\-exclude\-if\-present\fP, \fB\-\-keep\-exclude\-tags\fP +and PATH have the exact same semantics as in "borg create", but they only check +for files in the archives and not in the local file system. If PATHs are specified, +the resulting archives will only contain files from these PATHs. .sp Note that all paths in an archive are relative, therefore absolute patterns/paths will \fInot\fP match (\fB\-\-exclude\fP, \fB\-\-exclude\-from\fP, PATHs). @@ -59,7 +61,7 @@ Borg 1.x archives. Depending on the PATHs and patterns given, recreate can be used to permanently delete files from archives. When in doubt, use \fB\-\-dry\-run \-\-verbose \-\-list\fP to see how patterns/PATHS are -interpreted. +interpreted. See \fIlist_item_flags\fP in \fBborg create\fP for details. .sp The archive being recreated is only removed after the operation completes. The archive that is built during the operation exists at the same time at @@ -71,6 +73,17 @@ When rechunking space usage can be substantial, expect at least the entire deduplicated size of the archives using the previous chunker params. When recompressing expect approx. (throughput / checkpoint\-interval) in space usage, assuming all chunks are recompressed. +.sp +If you recently ran borg check \-\-repair and it had to fix lost chunks with all\-zero +replacement chunks, please first run another backup for the same data and re\-run +borg check \-\-repair afterwards to heal any archives that had lost chunks which are +still generated from the input data. +.sp +Important: running borg recreate to re\-chunk will remove the chunks_healthy +metadata of all items with replacement chunks, so healing will not be possible +any more after re\-chunking (it is also unlikely it would ever work: due to the +change of chunking parameters, the missing chunk likely will never be seen again +even if you still have the data that produced it). .SH OPTIONS .sp See \fIborg\-common(1)\fP for common options of Borg commands. @@ -78,7 +91,7 @@ See \fIborg\-common(1)\fP for common options of Borg commands. .INDENT 0.0 .TP .B REPOSITORY_OR_ARCHIVE -repository/archive to recreate +repository or archive to recreate .TP .B PATH paths to recreate; patterns are supported @@ -90,7 +103,7 @@ paths to recreate; patterns are supported output verbose list of items (files, dirs, ...) .TP .BI \-\-filter \ STATUSCHARS -only display items with the given status characters +only display items with the given status characters (listed in borg create \-\-help) .TP .B \-n\fP,\fB \-\-dry\-run do not change anything @@ -101,26 +114,26 @@ print statistics at end .SS Exclusion options .INDENT 0.0 .TP -.BI \-e \ PATTERN\fP,\fB \ \-\-exclude \ PATTERN +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN exclude paths matching PATTERN .TP .BI \-\-exclude\-from \ EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line .TP +.BI \-\-pattern \ PATTERN +experimental: include/exclude paths matching PATTERN +.TP +.BI \-\-patterns\-from \ PATTERNFILE +experimental: read include/exclude patterns from PATTERNFILE, one per line +.TP .B \-\-exclude\-caches -exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.brynosaurus.com/cachedir/spec.html\fP) +exclude directories that contain a CACHEDIR.TAG file (\fI\%http://www.bford.info/cachedir/spec.html\fP) .TP .BI \-\-exclude\-if\-present \ NAME exclude directories that are tagged by containing a filesystem object with the given NAME .TP .B \-\-keep\-exclude\-tags\fP,\fB \-\-keep\-tag\-files if tag objects are specified with \fB\-\-exclude\-if\-present\fP, don\(aqt omit the tag objects themselves from the backup archive -.TP -.BI \-\-pattern \ PATTERN -experimental: include/exclude paths matching PATTERN -.TP -.BI \-\-patterns\-from \ PATTERNFILE -experimental: read include/exclude patterns from PATTERNFILE, one per line .UNINDENT .SS Archive options .INDENT 0.0 @@ -128,7 +141,7 @@ experimental: read include/exclude patterns from PATTERNFILE, one per line .BI \-\-target \ TARGET create a new archive with the name ARCHIVE, do not replace existing archive (only applies for a single archive) .TP -.BI \-c \ SECONDS\fP,\fB \ \-\-checkpoint\-interval \ SECONDS +.BI \-c \ SECONDS\fR,\fB \ \-\-checkpoint\-interval \ SECONDS write checkpoint every SECONDS seconds (Default: 1800) .TP .BI \-\-comment \ COMMENT @@ -137,11 +150,11 @@ add a comment text to the archive .BI \-\-timestamp \ TIMESTAMP manually specify the archive creation date/time (UTC, yyyy\-mm\-ddThh:mm:ss format). alternatively, give a reference file/directory. .TP -.BI \-C \ COMPRESSION\fP,\fB \ \-\-compression \ COMPRESSION +.BI \-C \ COMPRESSION\fR,\fB \ \-\-compression \ COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. .TP -.B \-\-recompress -recompress data chunks according to \fB\-\-compression\fP if \fIif\-different\fP\&. When \fIalways\fP, chunks that are already compressed that way are not skipped, but compressed again. Only the algorithm is considered for \fIif\-different\fP, not the compression level (if any). +.BI \-\-recompress \ MODE +recompress data chunks according to \fB\-\-compression\fP\&. MODE \fIif\-different\fP: recompress if current compression is with a different compression algorithm (the level is not considered). MODE \fIalways\fP: recompress even if current compression is with the same compression algorithm (use this to change the compression level). MODE \fInever\fP (default): do not recompress. .TP .BI \-\-chunker\-params \ PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or \fIdefault\fP to use the current defaults. default: 19,23,21,4095 @@ -152,19 +165,20 @@ specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HA .sp .nf .ft C -# Make old (Attic / Borg 0.xx) archives deduplicate with Borg 1.x archives -# Archives created with Borg 1.1+ and the default chunker params are skipped (archive ID stays the same) +# Make old (Attic / Borg 0.xx) archives deduplicate with Borg 1.x archives. +# Archives created with Borg 1.1+ and the default chunker params are skipped +# (archive ID stays the same). $ borg recreate /mnt/backup \-\-chunker\-params default \-\-progress # Create a backup with little but fast compression $ borg create /mnt/backup::archive /some/files \-\-compression lz4 -# Then compress it \- this might take longer, but the backup has already completed, so no inconsistencies -# from a long\-running backup job. +# Then compress it \- this might take longer, but the backup has already completed, +# so no inconsistencies from a long\-running backup job. $ borg recreate /mnt/backup::archive \-\-recompress \-\-compression zlib,9 -# Remove unwanted files from all archives in a repository -$ borg recreate /mnt/backup \-e /home/icke/Pictures/drunk_photos - +# Remove unwanted files from all archives in a repository. +# Note the relative path for the \-\-exclude option \- archives only contain relative paths. +$ borg recreate /mnt/backup \-\-exclude home/icke/Pictures/drunk_photos # Change archive comment $ borg create \-\-comment "This is a comment" /mnt/backup::archivename ~ diff --git a/docs/man/borg-rename.1 b/docs/man/borg-rename.1 index 17e65f4395..0be2d4f15d 100644 --- a/docs/man/borg-rename.1 +++ b/docs/man/borg-rename.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-RENAME 1 "2017-06-18" "" "borg backup tool" +.TH BORG-RENAME 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-rename \- Rename an existing archive . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] rename ARCHIVE NEWNAME +borg [common options] rename [options] ARCHIVE NEWNAME .SH DESCRIPTION .sp This command renames an archive in the repository. diff --git a/docs/man/borg-serve.1 b/docs/man/borg-serve.1 index 5052c724de..67429bf1cb 100644 --- a/docs/man/borg-serve.1 +++ b/docs/man/borg-serve.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-SERVE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-SERVE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-serve \- Start in server mode. This command is usually not used manually. . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] serve +borg [common options] serve [options] .SH DESCRIPTION .sp This command starts a repository server process. This command is usually not used manually. @@ -46,12 +46,12 @@ See \fIborg\-common(1)\fP for common options of Borg commands. restrict repository access to PATH. Can be specified multiple times to allow the client access to several directories. Access to all sub\-directories is granted implicitly; PATH doesn\(aqt need to directly point to a repository. .TP .BI \-\-restrict\-to\-repository \ PATH -restrict repository access. Only the repository located at PATH (no sub\-directories are considered) is accessible. Can be specified multiple times to allow the client access to several repositories. Unlike \-\-restrict\-to\-path sub\-directories are not accessible; PATH needs to directly point at a repository location. PATH may be an empty directory or the last element of PATH may not exist, in which case the client may initialize a repository there. +restrict repository access. Only the repository located at PATH (no sub\-directories are considered) is accessible. Can be specified multiple times to allow the client access to several repositories. Unlike \fB\-\-restrict\-to\-path\fP sub\-directories are not accessible; PATH needs to directly point at a repository location. PATH may be an empty directory or the last element of PATH may not exist, in which case the client may initialize a repository there. .TP .B \-\-append\-only only allow appending to repository segment files .TP -.B \-\-storage\-quota +.BI \-\-storage\-quota \ QUOTA Override storage quota of the repository (e.g. 5G, 1.5T). When a new repository is initialized, sets the storage quota on the new repository as well. Default: no quota. .UNINDENT .SH EXAMPLES @@ -78,7 +78,7 @@ locations like \fB/etc/environment\fP or in the forced command itself (example b # Use key options to disable unneeded and potentially dangerous SSH functionality. # This will help to secure an automated remote backup system. $ cat ~/.ssh/authorized_keys -command="borg serve \-\-restrict\-to\-path /path/to/repo",no\-pty,no\-agent\-forwarding,no\-port\-forwarding,no\-X11\-forwarding,no\-user\-rc ssh\-rsa AAAAB3[...] +command="borg serve \-\-restrict\-to\-path /path/to/repo",restrict ssh\-rsa AAAAB3[...] # Set a BORG_XXX environment variable on the "borg serve" side $ cat ~/.ssh/authorized_keys @@ -87,6 +87,63 @@ command="export BORG_XXX=value; borg serve [...]",restrict ssh\-rsa [...] .fi .UNINDENT .UNINDENT +.sp +\fBNOTE:\fP +.INDENT 0.0 +.INDENT 3.5 +The examples above use the \fBrestrict\fP directive. This does automatically +block potential dangerous ssh features, even when they are added in a future +update. Thus, this option should be preferred. +.sp +If you\(aqre using openssh\-server < 7.2, however, you have to explicitly specify +the ssh features to restrict and cannot simply use the restrict option as it +has been introduced in v7.2. We recommend to use +\fBno\-port\-forwarding,no\-X11\-forwarding,no\-pty,no\-agent\-forwarding,no\-user\-rc\fP +in this case. +.UNINDENT +.UNINDENT +.SS SSH Configuration +.sp +\fBborg serve\fP\(aqs pipes (\fBstdin\fP/\fBstdout\fP/\fBstderr\fP) are connected to the \fBsshd\fP process on the server side. In the event that the SSH connection between \fBborg serve\fP and the client is disconnected or stuck abnormally (for example, due to a network outage), it can take a long time for \fBsshd\fP to notice the client is disconnected. In the meantime, \fBsshd\fP continues running, and as a result so does the \fBborg serve\fP process holding the lock on the repository. This can cause subsequent \fBborg\fP operations on the remote repository to fail with the error: \fBFailed to create/acquire the lock\fP\&. +.sp +In order to avoid this, it is recommended to perform the following additional SSH configuration: +.sp +Either in the client side\(aqs \fB~/.ssh/config\fP file, or in the client\(aqs \fB/etc/ssh/ssh_config\fP file: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +Host backupserver + ServerAliveInterval 10 + ServerAliveCountMax 30 +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Replacing \fBbackupserver\fP with the hostname, FQDN or IP address of the borg server. +.sp +This will cause the client to send a keepalive to the server every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the ssh client process will be terminated, causing the borg process to terminate gracefully. +.sp +On the server side\(aqs \fBsshd\fP configuration file (typically \fB/etc/ssh/sshd_config\fP): +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +ClientAliveInterval 10 +ClientAliveCountMax 30 +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +This will cause the server to send a keep alive to the client every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the server\(aqs sshd process will be terminated, causing the \fBborg serve\fP process to terminate gracefully and release the lock on the repository. +.sp +If you then run borg commands with \fB\-\-lock\-wait 600\fP, this gives sufficient time for the borg serve processes to terminate after the SSH connection is torn down after the 300 second wait for the keepalives to fail. +.sp +You may, of course, modify the timeout values demonstrated above to values that suit your environment and use case. .SH SEE ALSO .sp \fIborg\-common(1)\fP diff --git a/docs/man/borg-umount.1 b/docs/man/borg-umount.1 index 21b3c2eec7..5aab0c51e2 100644 --- a/docs/man/borg-umount.1 +++ b/docs/man/borg-umount.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-UMOUNT 1 "2017-06-18" "" "borg backup tool" +.TH BORG-UMOUNT 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-umount \- un-mount the FUSE filesystem . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] umount MOUNTPOINT +borg [common options] umount [options] MOUNTPOINT .SH DESCRIPTION .sp This command un\-mounts a FUSE filesystem that was mounted with \fBborg mount\fP\&. @@ -49,31 +49,44 @@ See \fIborg\-common(1)\fP for common options of Borg commands. mountpoint of the filesystem to umount .UNINDENT .SH EXAMPLES -.SS borg mount .INDENT 0.0 .INDENT 3.5 .sp .nf .ft C +# Mounting the repository shows all archives. +# Archives are loaded lazily, expect some delay when navigating to an archive +# for the first time. +$ borg mount /path/to/repo /tmp/mymountpoint +$ ls /tmp/mymountpoint +root\-2016\-02\-14 root\-2016\-02\-15 +$ borg umount /tmp/mymountpoint + +# Mounting a specific archive is possible as well. $ borg mount /path/to/repo::root\-2016\-02\-15 /tmp/mymountpoint $ ls /tmp/mymountpoint -bin boot etc home lib lib64 lost+found media mnt opt root sbin srv tmp usr var +bin boot etc home lib lib64 lost+found media mnt opt +root sbin srv tmp usr var $ borg umount /tmp/mymountpoint -.ft P -.fi -.UNINDENT -.UNINDENT -.INDENT 0.0 -.INDENT 3.5 -.sp -.nf -.ft C + +# The experimental "versions view" merges all archives in the repository +# and provides a versioned view on files. $ borg mount \-o versions /path/to/repo /tmp/mymountpoint $ ls \-l /tmp/mymountpoint/home/user/doc.txt/ total 24 -\-rw\-rw\-r\-\- 1 user group 12357 Aug 26 21:19 doc.txt.cda00bc9 -\-rw\-rw\-r\-\- 1 user group 12204 Aug 26 21:04 doc.txt.fa760f28 -$ fusermount \-u /tmp/mymountpoint +\-rw\-rw\-r\-\- 1 user group 12357 Aug 26 21:19 doc.cda00bc9.txt +\-rw\-rw\-r\-\- 1 user group 12204 Aug 26 21:04 doc.fa760f28.txt +$ borg umount /tmp/mymountpoint + +# Archive filters are supported. +# These are especially handy for the "versions view", +# which does not support lazy processing of archives. +$ borg mount \-o versions \-\-glob\-archives \(aq*\-my\-home\(aq \-\-last 10 /path/to/repo /tmp/mymountpoint + +# Exclusion options are supported. +# These can speed up mounting and lower memory needs significantly. +$ borg mount /path/to/repo /tmp/mymountpoint only/that/path +$ borg mount \-\-exclude \(aq...\(aq /path/to/repo /tmp/mymountpoint .ft P .fi .UNINDENT diff --git a/docs/man/borg-upgrade.1 b/docs/man/borg-upgrade.1 index 0d4cef89c9..37c8bbf958 100644 --- a/docs/man/borg-upgrade.1 +++ b/docs/man/borg-upgrade.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-UPGRADE 1 "2017-06-18" "" "borg backup tool" +.TH BORG-UPGRADE 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-upgrade \- upgrade a repository from a previous version . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] upgrade REPOSITORY +borg [common options] upgrade [options] [REPOSITORY] .SH DESCRIPTION .sp Upgrade an existing, local Borg repository. @@ -100,17 +100,22 @@ from scratch, use \fIborg delete\fP over the copied repository to make sure the cache files are also removed: .INDENT 0.0 .INDENT 3.5 +.sp +.nf +.ft C borg delete borg +.ft P +.fi .UNINDENT .UNINDENT .sp -Unless \fB\-\-inplace\fP is specified, the upgrade process first -creates a backup copy of the repository, in -REPOSITORY.upgrade\-DATETIME, using hardlinks. This takes -longer than in place upgrades, but is much safer and gives -progress information (as opposed to \fBcp \-al\fP). Once you are -satisfied with the conversion, you can safely destroy the -backup copy. +Unless \fB\-\-inplace\fP is specified, the upgrade process first creates a backup +copy of the repository, in REPOSITORY.before\-upgrade\-DATETIME, using hardlinks. +This requires that the repository and its parent directory reside on same +filesystem so the hardlink copy can work. +This takes longer than in place upgrades, but is much safer and gives +progress information (as opposed to \fBcp \-al\fP). Once you are satisfied +with the conversion, you can safely destroy the backup copy. .sp WARNING: Running the upgrade in place will make the current copy unusable with older version, with no way of going back @@ -133,17 +138,16 @@ path to the repository to be upgraded do not change repository .TP .B \-\-inplace -rewrite repository in place, with no chance of going back to older -versions of the repository. +rewrite repository in place, with no chance of going back to older versions of the repository. .TP .B \-\-force Force upgrade .TP .B \-\-tam -Enable manifest authentication (in key and cache) (Borg 1.0.9 and later) +Enable manifest authentication (in key and cache) (Borg 1.0.9 and later). .TP .B \-\-disable\-tam -Disable manifest authentication (in key and cache) +Disable manifest authentication (in key and cache). .UNINDENT .SH EXAMPLES .INDENT 0.0 @@ -153,7 +157,7 @@ Disable manifest authentication (in key and cache) .ft C # Upgrade the borg repository to the most recent version. $ borg upgrade \-v /path/to/repo -making a hardlink copy in /path/to/repo.upgrade\-2016\-02\-15\-20:51:55 +making a hardlink copy in /path/to/repo.before\-upgrade\-2016\-02\-15\-20:51:55 opening attic repository with borg and converting no key file found for repository converting repo index /path/to/repo/index.0 diff --git a/docs/man/borg-with-lock.1 b/docs/man/borg-with-lock.1 index c71bd28a24..a753f6f036 100644 --- a/docs/man/borg-with-lock.1 +++ b/docs/man/borg-with-lock.1 @@ -1,6 +1,6 @@ .\" Man page generated from reStructuredText. . -.TH BORG-WITH-LOCK 1 "2017-06-18" "" "borg backup tool" +.TH BORG-WITH-LOCK 1 "2021-03-22" "" "borg backup tool" .SH NAME borg-with-lock \- run a user specified command with the repository lock held . @@ -32,7 +32,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .. .SH SYNOPSIS .sp -borg [common options] with\-lock REPOSITORY COMMAND ARGS +borg [common options] with\-lock [options] REPOSITORY COMMAND [ARGS...] .SH DESCRIPTION .sp This command runs a user\-specified command while the repository lock is held. @@ -41,11 +41,15 @@ It will first try to acquire the lock (make sure that no other operation is running in the repo), then execute the given command as a subprocess and wait for its termination, release the lock and return the user command\(aqs return code as borg\(aqs return code. +.sp +\fBNOTE:\fP .INDENT 0.0 -.TP -.B Note: if you copy a repository with the lock held, the lock will be present in -the copy, obviously. Thus, before using borg on the copy, you need to -use "borg break\-lock" on it. +.INDENT 3.5 +If you copy a repository with the lock held, the lock will be present in +the copy. Thus, before using borg on the copy from a different host, +you need to use "borg break\-lock" on the copied repository, because +Borg is cautious and does not automatically remove stale locks made by a different host. +.UNINDENT .UNINDENT .SH OPTIONS .sp diff --git a/docs/man/borg.1 b/docs/man/borg.1 index 99493444ea..81e220f280 100644 --- a/docs/man/borg.1 +++ b/docs/man/borg.1 @@ -48,8 +48,8 @@ fully trusted targets. .sp Borg stores a set of files in an \fIarchive\fP\&. A \fIrepository\fP is a collection of \fIarchives\fP\&. The format of repositories is Borg\-specific. Borg does not -distinguish archives from each other in a any way other than their name, -it does not matter when or where archives where created (eg. different hosts). +distinguish archives from each other in any way other than their name, +it does not matter when or where archives were created (e.g. different hosts). .SH EXAMPLES .SS A step\-by\-step example .INDENT 0.0 @@ -186,6 +186,26 @@ get other informational messages. .UNINDENT .UNINDENT .SH NOTES +.SS Positional Arguments and Options: Order matters +.sp +Borg only supports taking options (\fB\-s\fP and \fB\-\-progress\fP in the example) +to the left or right of all positional arguments (\fBrepo::archive\fP and \fBpath\fP +in the example), but not in between them: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +borg create \-s \-\-progress repo::archive path # good and preferred +borg create repo::archive path \-s \-\-progress # also works +borg create \-s repo::archive path \-\-progress # works, but ugly +borg create repo::archive \-s \-\-progress path # BAD +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +This is due to a problem in the argparse module: \fI\%https://bugs.python.org/issue15112\fP .SS Repository URLs .sp \fBLocal filesystem\fP (or locally mounted network filesystem): @@ -254,7 +274,26 @@ name). .sp If you have set BORG_REPO (see above) and an archive location is needed, use \fB::archive_name\fP \- the repo URL part is then read from BORG_REPO. -.SS Type of log output +.SS Logging +.sp +Borg writes all log output to stderr by default. But please note that something +showing up on stderr does \fInot\fP indicate an error condition just because it is +on stderr. Please check the log levels of the messages and the return code of +borg for determining error, warning or success conditions. +.sp +If you want to capture the log output to a file, just redirect it: +.INDENT 0.0 +.INDENT 3.5 +.sp +.nf +.ft C +borg create repo::archive myfiles 2>> logfile +.ft P +.fi +.UNINDENT +.UNINDENT +.sp +Custom logging configurations can be implemented via BORG_LOGGING_CONF. .sp The log level of the builtin logging configuration defaults to WARNING. This is because we want Borg to be mostly silent and only output @@ -354,10 +393,19 @@ See also BORG_NEW_PASSPHRASE. When set, use the standard output of the command (trailing newlines are stripped) to answer the passphrase question for encrypted repositories. It is used when a passphrase is needed to access an encrypted repo as well as when a new -passphrase should be initially set when initializing an encrypted repo. +passphrase should be initially set when initializing an encrypted repo. Note that the command +is executed without a shell. So variables, like \fB$HOME\fP will work, but \fB~\fP won\(aqt. If BORG_PASSPHRASE is also set, it takes precedence. See also BORG_NEW_PASSPHRASE. .TP +.B BORG_PASSPHRASE_FD +When set, specifies a file descriptor to read a passphrase +from. Programs starting borg may choose to open an anonymous pipe +and use it to pass a passphrase. This is safer than passing via +BORG_PASSPHRASE, because on some systems (e.g. Linux) environment +can be examined by other processes. +If BORG_PASSPHRASE or BORG_PASSCOMMAND are also set, they take precedence. +.TP .B BORG_NEW_PASSPHRASE When set, use the value to answer the passphrase question when a \fBnew\fP passphrase is asked for. This variable is checked first. If it is not set, BORG_PASSPHRASE and BORG_PASSCOMMAND will also @@ -372,24 +420,56 @@ Borg assumes that it can derive a unique hostname / identity (see \fBborg debug If this is not the case or you do not want Borg to automatically remove stale locks, set this to \fIno\fP\&. .TP +.B BORG_HOST_ID +Borg usually computes a host id from the FQDN plus the results of \fBuuid.getnode()\fP (which usually returns +a unique id based on the MAC address of the network interface. Except if that MAC happens to be all\-zero \- in +that case it returns a random value, which is not what we want (because it kills automatic stale lock removal). +So, if you have a all\-zero MAC address or other reasons to better externally control the host id, just set this +environment variable to a unique value. If all your FQDNs are unique, you can just use the FQDN. If not, +use \fI\%fqdn@uniqueid\fP\&. +.TP .B BORG_LOGGING_CONF When set, use the given filename as \fI\%INI\fP\-style logging configuration. +A basic example conf can be found at \fBdocs/misc/logging.conf\fP\&. .TP .B BORG_RSH When set, use this command instead of \fBssh\fP\&. This can be used to specify ssh options, such as -a custom identity file \fBssh \-i /path/to/private/key\fP\&. See \fBman ssh\fP for other options. +a custom identity file \fBssh \-i /path/to/private/key\fP\&. See \fBman ssh\fP for other options. Using +the \fB\-\-rsh CMD\fP commandline option overrides the environment variable. .TP .B BORG_REMOTE_PATH When set, use the given path as borg executable on the remote (defaults to "borg" if unset). Using \fB\-\-remote\-path PATH\fP commandline option overrides the environment variable. .TP +.B BORG_FILES_CACHE_SUFFIX +When set to a value at least one character long, instructs borg to use a specifically named +(based on the suffix) alternative files cache. This can be used to avoid loading and saving +cache entries for backup sources other than the current sources. +.TP .B BORG_FILES_CACHE_TTL When set to a numeric value, this determines the maximum "time to live" for the files cache entries (default: 20). The files cache is used to quickly determine whether a file is unchanged. The FAQ explains this more detailed in: \fIalways_chunking\fP .TP -.B TMPDIR -where temporary files are stored (might need a lot of temporary space for some operations) +.B BORG_SHOW_SYSINFO +When set to no (default: yes), system information (like OS, Python version, ...) in +exceptions is not shown. +Please only use for good reasons as it makes issues harder to analyze. +.TP +.B BORG_WORKAROUNDS +A list of comma separated strings that trigger workarounds in borg, +e.g. to work around bugs in other software. +.sp +Currently known strings are: +.INDENT 7.0 +.TP +.B basesyncfile +Use the more simple BaseSyncFile code to avoid issues with sync_file_range. +You might need this to run borg on WSL (Windows Subsystem for Linux) or +in systemd.nspawn containers on some architectures (e.g. ARM). +Using this does not affect data safety, but might result in a more bursty +write to disk behaviour (not continuously streaming to disk). +.UNINDENT .UNINDENT .TP .B Some automatic "answerers" (if set, they automatically answer confirmation questions): @@ -402,36 +482,55 @@ For "Warning: Attempting to access a previously unknown unencrypted repository" For "Warning: The repository at location ... was previously located at ..." .TP .B BORG_CHECK_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) -For "Warning: \(aqcheck \-\-repair\(aq is an experimental feature that might result in data loss." +For "This is a potentially dangerous function..." (check \-\-repair) .TP .B BORG_DELETE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) For "You requested to completely DELETE the repository \fIincluding\fP all archives it contains:" -.TP -.B BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) -For "recreate is an experimental feature." .UNINDENT .sp Note: answers are case sensitive. setting an invalid answer value might either give the default answer or ask you interactively, depending on whether retries are allowed (they by default are allowed). So please test your scripts interactively before making them a non\-interactive script. +.UNINDENT +.INDENT 0.0 .TP .B Directories and files: .INDENT 7.0 .TP +.B BORG_BASE_DIR +Defaults to \fB$HOME\fP or \fB~$USER\fP or \fB~\fP (in that order). +If you want to move all borg\-specific folders to a custom path at once, all you need to do is +to modify \fBBORG_BASE_DIR\fP: the other paths for cache, config etc. will adapt accordingly +(assuming you didn\(aqt set them to a different custom value). +.TP +.B BORG_CACHE_DIR +Defaults to \fB$BORG_BASE_DIR/.cache/borg\fP\&. If \fBBORG_BASE_DIR\fP is not explicitly set while +\fI\%XDG env var\fP \fBXDG_CACHE_HOME\fP is set, then \fB$XDG_CACHE_HOME/borg\fP is being used instead. +This directory contains the local cache and might need a lot +of space for dealing with big repositories. Make sure you\(aqre aware of the associated +security aspects of the cache location: \fIcache_security\fP +.TP +.B BORG_CONFIG_DIR +Defaults to \fB$BORG_BASE_DIR/.config/borg\fP\&. If \fBBORG_BASE_DIR\fP is not explicitly set while +\fI\%XDG env var\fP \fBXDG_CONFIG_HOME\fP is set, then \fB$XDG_CONFIG_HOME/borg\fP is being used instead. +This directory contains all borg configuration directories, see the FAQ +for a security advisory about the data in this directory: \fIhome_config_borg\fP +.TP +.B BORG_SECURITY_DIR +Defaults to \fB$BORG_CONFIG_DIR/security\fP\&. +This directory contains information borg uses to track its usage of NONCES ("numbers used +once" \- usually in encryption context) and other security relevant data. +.TP .B BORG_KEYS_DIR -Default to \(aq~/.config/borg/keys\(aq. This directory contains keys for encrypted repositories. +Defaults to \fB$BORG_CONFIG_DIR/keys\fP\&. +This directory contains keys for encrypted repositories. .TP .B BORG_KEY_FILE When set, use the given filename as repository key file. .TP -.B BORG_SECURITY_DIR -Default to \(aq~/.config/borg/security\(aq. This directory contains information borg uses to -track its usage of NONCES ("numbers used once" \- usually in encryption context) and other -security relevant data. -.TP -.B BORG_CACHE_DIR -Default to \(aq~/.cache/borg\(aq. This directory contains the local cache and might need a lot -of space for dealing with big repositories). +.B TMPDIR +This is where temporary files are stored (might need a lot of temporary space for some +operations), see \fI\%tempfile\fP for details. .UNINDENT .TP .B Building: @@ -440,22 +539,27 @@ of space for dealing with big repositories). .B BORG_OPENSSL_PREFIX Adds given OpenSSL header file directory to the default locations (setup.py). .TP -.B BORG_LZ4_PREFIX -Adds given LZ4 header file directory to the default locations (setup.py). +.B BORG_LIBLZ4_PREFIX +Adds given prefix directory to the default locations. If a \(aqinclude/lz4.h\(aq is found Borg +will be linked against the system liblz4 instead of a bundled implementation. (setup.py) .TP .B BORG_LIBB2_PREFIX Adds given prefix directory to the default locations. If a \(aqinclude/blake2.h\(aq is found Borg will be linked against the system libb2 instead of a bundled implementation. (setup.py) +.TP +.B BORG_LIBZSTD_PREFIX +Adds given prefix directory to the default locations. If a \(aqinclude/zstd.h\(aq is found Borg +will be linked against the system libzstd instead of a bundled implementation. (setup.py) .UNINDENT .UNINDENT .sp Please note: .INDENT 0.0 .IP \(bu 2 -be very careful when using the "yes" sayers, the warnings with prompt exist for your / your data\(aqs security/safety +Be very careful when using the "yes" sayers, the warnings with prompt exist for your / your data\(aqs security/safety. .IP \(bu 2 -also be very careful when putting your passphrase into a script, make sure it has appropriate file permissions -(e.g. mode 600, root:root). +Also be very careful when putting your passphrase into a script, make sure it has appropriate file permissions (e.g. +mode 600, root:root). .UNINDENT .SS File systems .sp @@ -472,6 +576,23 @@ know a list of affected hardware. If you are suspicious whether your Borg repository is still consistent and readable after one of the failures mentioned above occurred, run \fBborg check \-\-verify\-data\fP to make sure it is consistent. +Requirements for Borg repository file systems.INDENT 0.0 +.IP \(bu 2 +Long file names +.IP \(bu 2 +At least three directory levels with short names +.IP \(bu 2 +Typically, file sizes up to a few hundred MB. +Large repositories may require large files (>2 GB). +.IP \(bu 2 +Up to 1000 files per directory (10000 for repositories initialized with Borg 1.0) +.IP \(bu 2 +mkdir(2) should be atomic, since it is used for locking +.IP \(bu 2 +Hardlinks are needed for \fIborg_upgrade\fP (if \fB\-\-inplace\fP option is not used). +Also hardlinks are used for more safe and secure file updating (e.g. of the repo +config file), but the code tries to work also if hardlinks are not supported. +.UNINDENT .SS Units .sp To display quantities, Borg takes care of respecting the @@ -502,12 +623,21 @@ server to get the approximate resource usage. .INDENT 0.0 .TP .B CPU client: -borg create: does chunking, hashing, compression, crypto (high CPU usage) -chunks cache sync: quite heavy on CPU, doing lots of hashtable operations. -borg extract: crypto, decompression (medium to high CPU usage) -borg check: similar to extract, but depends on options given. -borg prune / borg delete archive: low to medium CPU usage -borg delete repo: done on the server +.INDENT 7.0 +.IP \(bu 2 +\fBborg create:\fP does chunking, hashing, compression, crypto (high CPU usage) +.IP \(bu 2 +\fBchunks cache sync:\fP quite heavy on CPU, doing lots of hashtable operations. +.IP \(bu 2 +\fBborg extract:\fP crypto, decompression (medium to high CPU usage) +.IP \(bu 2 +\fBborg check:\fP similar to extract, but depends on options given. +.IP \(bu 2 +\fBborg prune / borg delete archive:\fP low to medium CPU usage +.IP \(bu 2 +\fBborg delete repo:\fP done on the server +.UNINDENT +.sp It won\(aqt go beyond 100% of 1 core as the code is currently single\-threaded. Especially higher zlib and lzma compression levels use significant amounts of CPU cycles. Crypto might be cheap on the CPU (if hardware accelerated) or @@ -559,7 +689,16 @@ the size of all deduplicated, small chunks in the repository. Big chunks won\(aqt be locally cached. .TP .B Temporary files (server): -None. +A non\-trivial amount of data will be stored on the remote temp directory +for each client that connects to it. For some remotes, this can fill the +default temporary directory at /tmp. This can be remediated by ensuring the +$TMPDIR, $TEMP, or $TMP environment variable is properly set for the sshd +process. +For some OSes, this can be done just by setting the correct value in the +\&.bashrc (or equivalent login config file for other shells), however in +other cases it may be necessary to first enable \fBPermitUserEnvironment yes\fP +in your \fBsshd_config\fP file, then add \fBenvironment="TMPDIR=/my/big/tmpdir"\fP +at the start of the public key to be used in the \fBauthorized_hosts\fP file. .TP .B Cache files (client only): Contains the chunks index and files index (plus a collection of single\- @@ -574,6 +713,155 @@ operations used for transaction support also go over the connection. If you backup multiple sources to one target repository, additional traffic happens for cache resynchronization. .UNINDENT +.SS Support for file metadata +.sp +Besides regular file and directory structures, Borg can preserve +.INDENT 0.0 +.IP \(bu 2 +symlinks (stored as symlink, the symlink is not followed) +.IP \(bu 2 +special files: +.INDENT 2.0 +.IP \(bu 2 +character and block device files (restored via mknod) +.IP \(bu 2 +FIFOs ("named pipes") +.IP \(bu 2 +special file \fIcontents\fP can be backed up in \fB\-\-read\-special\fP mode. +By default the metadata to create them with mknod(2), mkfifo(2) etc. is stored. +.UNINDENT +.IP \(bu 2 +hardlinked regular files, devices, FIFOs (considering all items in the same archive) +.IP \(bu 2 +timestamps in nanosecond precision: mtime, atime, ctime +.IP \(bu 2 +other timestamps: birthtime (on platforms supporting it) +.IP \(bu 2 +permissions: +.INDENT 2.0 +.IP \(bu 2 +IDs of owning user and owning group +.IP \(bu 2 +names of owning user and owning group (if the IDs can be resolved) +.IP \(bu 2 +Unix Mode/Permissions (u/g/o permissions, suid, sgid, sticky) +.UNINDENT +.UNINDENT +.sp +On some platforms additional features are supported: +.\" Yes/No's are grouped by reason/mechanism/reference. +. +.TS +center; +|l|l|l|l|. +_ +T{ +Platform +T} T{ +ACLs +[5] +T} T{ +xattr +[6] +T} T{ +Flags +[7] +T} +_ +T{ +Linux +T} T{ +Yes +T} T{ +Yes +T} T{ +Yes [1] +T} +_ +T{ +Mac OS X +T} T{ +Yes +T} T{ +Yes +T} T{ +Yes (all) +T} +_ +T{ +FreeBSD +T} T{ +Yes +T} T{ +Yes +T} T{ +Yes (all) +T} +_ +T{ +OpenBSD +T} T{ +n/a +T} T{ +n/a +T} T{ +Yes (all) +T} +_ +T{ +NetBSD +T} T{ +n/a +T} T{ +No [2] +T} T{ +Yes (all) +T} +_ +T{ +Solaris and derivatives +T} T{ +No [3] +T} T{ +No [3] +T} T{ +n/a +T} +_ +T{ +Windows (cygwin) +T} T{ +No [4] +T} T{ +No +T} T{ +No +T} +_ +.TE +.sp +Other Unix\-like operating systems may work as well, but have not been tested at all. +.sp +Note that most of the platform\-dependent features also depend on the file system. +For example, ntfs\-3g on Linux isn\(aqt able to convey NTFS ACLs. +.IP [1] 5 +Only "nodump", "immutable", "compressed" and "append" are supported. +Feature request #618 for more flags. +.IP [2] 5 +Feature request #1332 +.IP [3] 5 +Feature request #1337 +.IP [4] 5 +Cygwin tries to map NTFS ACLs to permissions with varying degrees of success. +.IP [5] 5 +The native access control list mechanism of the OS. This normally limits access to +non\-native ACLs. For example, NTFS ACLs aren\(aqt completely accessible on Linux with ntfs\-3g. +.IP [6] 5 +extended attributes; key\-value pairs attached to a file, mainly used by the OS. +This includes resource forks on Mac OS X. +.IP [7] 5 +aka \fIBSD flags\fP\&. The Linux set of flags [1] is portable across platforms. +The BSDs define additional flags. .SH SEE ALSO .sp \fIborg\-common(1)\fP for common command line options @@ -587,7 +875,7 @@ happens for cache resynchronization. \fIborg\-compression(1)\fP, \fIborg\-patterns(1)\fP, \fIborg\-placeholders(1)\fP .INDENT 0.0 .IP \(bu 2 -Main web site \fI\%https://borgbackup.readthedocs.org/\fP +Main web site \fI\%https://www.borgbackup.org/\fP .IP \(bu 2 Releases \fI\%https://github.com/borgbackup/borg/releases\fP .IP \(bu 2 diff --git a/docs/man/borgfs.1 b/docs/man/borgfs.1 new file mode 100644 index 0000000000..1af510feb8 --- /dev/null +++ b/docs/man/borgfs.1 @@ -0,0 +1,147 @@ +.\" Man page generated from reStructuredText. +. +.TH BORGFS 1 "2021-03-22" "" "borg backup tool" +.SH NAME +borgfs \- Mount archive or an entire repository as a FUSE filesystem +. +.nr rst2man-indent-level 0 +. +.de1 rstReportMargin +\\$1 \\n[an-margin] +level \\n[rst2man-indent-level] +level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] +- +\\n[rst2man-indent0] +\\n[rst2man-indent1] +\\n[rst2man-indent2] +.. +.de1 INDENT +.\" .rstReportMargin pre: +. RS \\$1 +. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] +. nr rst2man-indent-level +1 +.\" .rstReportMargin post: +.. +.de UNINDENT +. RE +.\" indent \\n[an-margin] +.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] +.nr rst2man-indent-level -1 +.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] +.in \\n[rst2man-indent\\n[rst2man-indent-level]]u +.. +.SH SYNOPSIS +.sp +borgfs [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] +.SH DESCRIPTION +.sp +This command mounts an archive as a FUSE filesystem. This can be useful for +browsing an archive or restoring individual files. Unless the \fB\-\-foreground\fP +option is given the command will run in the background until the filesystem +is \fBumounted\fP\&. +.sp +The command \fBborgfs\fP provides a wrapper for \fBborg mount\fP\&. This can also be +used in fstab entries: +\fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0\fP +.sp +To allow a regular user to use fstab entries, add the \fBuser\fP option: +\fB/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0\fP +.sp +For FUSE configuration and mount options, see the mount.fuse(8) manual page. +.sp +Additional mount options supported by borg: +.INDENT 0.0 +.IP \(bu 2 +versions: when used with a repository mount, this gives a merged, versioned +view of the files in the archives. EXPERIMENTAL, layout may change in future. +.IP \(bu 2 +allow_damaged_files: by default damaged files (where missing chunks were +replaced with runs of zeros by borg check \fB\-\-repair\fP) are not readable and +return EIO (I/O error). Set this option to read such files. +.IP \(bu 2 +ignore_permissions: for security reasons the "default_permissions" mount +option is internally enforced by borg. "ignore_permissions" can be given to +not enforce "default_permissions". +.UNINDENT +.sp +The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users +to tweak the performance. It sets the number of cached data chunks; additional +memory usage can be up to ~8 MiB times this number. The default is the number +of CPU cores. +.sp +When the daemonized process receives a signal or crashes, it does not unmount. +Unmounting in these cases could cause an active rsync or similar process +to unintentionally delete data. +.sp +When running in the foreground ^C/SIGINT unmounts cleanly, but other +signals or crashes do not. +.SH OPTIONS +.sp +See \fIborg\-common(1)\fP for common options of Borg commands. +.SS arguments +.INDENT 0.0 +.TP +.B REPOSITORY_OR_ARCHIVE +repository or archive to mount +.TP +.B MOUNTPOINT +where to mount filesystem +.TP +.B PATH +paths to extract; patterns are supported +.UNINDENT +.SS optional arguments +.INDENT 0.0 +.TP +.B \-V\fP,\fB \-\-version +show version number and exit +.TP +.B \-f\fP,\fB \-\-foreground +stay in foreground, do not daemonize +.TP +.B \-o +Extra mount options +.UNINDENT +.SS Archive filters +.INDENT 0.0 +.TP +.BI \-P \ PREFIX\fR,\fB \ \-\-prefix \ PREFIX +only consider archive names starting with this prefix. +.TP +.BI \-a \ GLOB\fR,\fB \ \-\-glob\-archives \ GLOB +only consider archive names matching the glob. sh: rules apply, see "borg help patterns". \fB\-\-prefix\fP and \fB\-\-glob\-archives\fP are mutually exclusive. +.TP +.BI \-\-sort\-by \ KEYS +Comma\-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp +.TP +.BI \-\-first \ N +consider first N archives after other filters were applied +.TP +.BI \-\-last \ N +consider last N archives after other filters were applied +.UNINDENT +.SS Exclusion options +.INDENT 0.0 +.TP +.BI \-e \ PATTERN\fR,\fB \ \-\-exclude \ PATTERN +exclude paths matching PATTERN +.TP +.BI \-\-exclude\-from \ EXCLUDEFILE +read exclude patterns from EXCLUDEFILE, one per line +.TP +.BI \-\-pattern \ PATTERN +experimental: include/exclude paths matching PATTERN +.TP +.BI \-\-patterns\-from \ PATTERNFILE +experimental: read include/exclude patterns from PATTERNFILE, one per line +.TP +.BI \-\-strip\-components \ NUMBER +Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. +.UNINDENT +.SH SEE ALSO +.sp +\fIborg\-common(1)\fP +.SH AUTHOR +The Borg Collective +.\" Generated by docutils manpage writer. +. diff --git a/docs/man_intro.rst b/docs/man_intro.rst index 85ba9bd323..44dee959e5 100644 --- a/docs/man_intro.rst +++ b/docs/man_intro.rst @@ -32,8 +32,8 @@ fully trusted targets. Borg stores a set of files in an *archive*. A *repository* is a collection of *archives*. The format of repositories is Borg-specific. Borg does not -distinguish archives from each other in a any way other than their name, -it does not matter when or where archives where created (eg. different hosts). +distinguish archives from each other in any way other than their name, +it does not matter when or where archives were created (e.g. different hosts). EXAMPLES -------- @@ -61,7 +61,7 @@ SEE ALSO `borg-compression(1)`, `borg-patterns(1)`, `borg-placeholders(1)` -* Main web site https://borgbackup.readthedocs.org/ +* Main web site https://www.borgbackup.org/ * Releases https://github.com/borgbackup/borg/releases * Changelog https://github.com/borgbackup/borg/blob/master/docs/changes.rst * GitHub https://github.com/borgbackup/borg diff --git a/docs/misc/asciinema/advanced.json b/docs/misc/asciinema/advanced.json new file mode 100644 index 0000000000..d9f951e6b5 --- /dev/null +++ b/docs/misc/asciinema/advanced.json @@ -0,0 +1,7733 @@ +{ + "version": 1, + "width": 78, + "height": 24, + "duration": 446.783754, + "command": null, + "title": null, + "env": { + "TERM": "xterm-256color", + "SHELL": "/bin/zsh" + }, + "stdout": [ + [ + 0.29658, + "\b\u001b[1m$ # \u001b[1mFor the pro users, here are some advanced features of borg, so you can imp\u001b[1mr\u001b[1mess your friends. ;)\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.025674, + "\u001b[?1l\u001b>" + ], + [ + 0.000375, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000796, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000953, + "\u001b]7;\u0007" + ], + [ + 0.000799, + "\u001b]7;\u0007" + ], + [ + 7.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 4.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000368, + "\u001b[?2004h" + ], + [ + 0.857202, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.269836, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.277016, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.185115, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.222294, + "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.098908, + "\b\u001b[1mt\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.471037, + "\b\u001b[1me\u001b[1m:\u001b[0m\u001b[39m" + ], + [ + 0.276132, + "\b\u001b[1m:\u001b[1m This screencast was made with borg version 1.1.0 – older or newer bo\u001b[1mr\u001b[1mg versions may behave differently.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.063392, + "\u001b[?1l\u001b>" + ], + [ + 0.001402, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001228, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.002846, + "\u001b]7;\u0007" + ], + [ + 0.002554, + "\u001b]7;\u0007" + ], + [ + 6.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000229, + "\u001b[?1h\u001b=" + ], + [ + 0.000858, + "\u001b[?2004h" + ], + [ + 0.944947, + "\u001b[?1l\u001b>" + ], + [ + 0.000319, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000652, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001131, + "\u001b]7;\u0007" + ], + [ + 0.000871, + "\u001b]7;\u0007" + ], + [ + 9.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000117, + "\u001b[?1h\u001b=" + ], + [ + 0.00014, + "\u001b[?2004h" + ], + [ + 0.91046, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.350642, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.226284, + "\b\b\u001b[1m#\u001b[1m \u001b[1mF\u001b[0m\u001b[39m" + ], + [ + 0.190635, + "\b\u001b[1mF\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.226298, + "\b\u001b[1mi\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.094075, + "\b\u001b[1mr\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.125931, + "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.210409, + "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.333349, + "\b\u001b[1m \u001b[1mof all, we can use several environment variables for borg.\u001b[0m\u001b[39m" + ], + [ + 1.115007, + "\u001b[?1l\u001b>" + ], + [ + 0.000418, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000665, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001185, + "\u001b]7;\u0007" + ], + [ + 0.00091, + "\u001b]7;\u0007" + ], + [ + 2.5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000298, + "\u001b[?2004h" + ], + [ + 1.193161, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.249128, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.253119, + "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" + ], + [ + 0.328187, + "\b\u001b[1mE\u001b[1m.\u001b[0m\u001b[39m" + ], + [ + 0.873845, + "\b\u001b[1m.\u001b[1mg\u001b[0m\u001b[39m" + ], + [ + 0.164238, + "\b\u001b[1mg\u001b[1m.\u001b[0m\u001b[39m" + ], + [ + 0.211331, + "\b\u001b[1m.\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.15971, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.133833, + "\b\u001b[1mw\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 2.95423, + "\b\u001b[1me\u001b[1m do not want to type in our repo path and password again and again…" + ], + [ + 1.769654, + "\u001b[K" + ], + [ + 2.7e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.000616, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000594, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00144, + "\u001b]7;\u0007" + ], + [ + 0.001172, + "\u001b]7;\u0007" + ], + [ + 3.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000419, + "\u001b[?2004h" + ], + [ + 0.975676, + "\u001b[1m\u001b[31me\u001b[0m\u001b[39m" + ], + [ + 0.156719, + "\b\u001b[0m\u001b[32me\u001b[32mx\u001b[39m" + ], + [ + 0.121911, + "\b\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mx\u001b[1m\u001b[31mp\u001b[0m\u001b[39m" + ], + [ + 0.15502, + "\b\u001b[1m\u001b[31mp\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.26241, + "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.126933, + "\b\b\b\b\b\u001b[0m\u001b[33me\u001b[0m\u001b[33mx\u001b[0m\u001b[33mp\u001b[0m\u001b[33mo\u001b[0m\u001b[33mr\u001b[33mt\u001b[39m" + ], + [ + 0.192182, + " " + ], + [ + 0.304561, + "B" + ], + [ + 0.192073, + "O" + ], + [ + 0.136183, + "R" + ], + [ + 0.114362, + "G" + ], + [ + 0.576349, + "_" + ], + [ + 0.103719, + "R" + ], + [ + 0.113626, + "E" + ], + [ + 0.159395, + "P" + ], + [ + 0.141942, + "O" + ], + [ + 0.554082, + "=" + ], + [ + 0.74644, + "'" + ], + [ + 0.69222, + "/" + ], + [ + 0.20093, + "m" + ], + [ + 0.108068, + "e" + ], + [ + 0.125576, + "d" + ], + [ + 0.161298, + "i" + ], + [ + 0.107949, + "a" + ], + [ + 0.423969, + "/" + ], + [ + 0.623591, + "b" + ], + [ + 0.102775, + "a" + ], + [ + 0.146442, + "c" + ], + [ + 0.116202, + "k" + ], + [ + 0.133034, + "u" + ], + [ + 0.282831, + "p" + ], + [ + 0.436512, + "/" + ], + [ + 0.551147, + "b" + ], + [ + 0.208373, + "o" + ], + [ + 0.108883, + "r" + ], + [ + 0.137272, + "g" + ], + [ + 0.218057, + "d" + ], + [ + 0.122586, + "e" + ], + [ + 0.133605, + "m" + ], + [ + 0.170095, + "o" + ], + [ + 0.795644, + "'" + ], + [ + 0.928899, + "\u001b[?1l\u001b>" + ], + [ + 0.001469, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000802, + "\u001b]2;export BORG_REPO='/media/backup/borgdemo' \u0007\u001b]1;export\u0007" + ], + [ + 0.000109, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001347, + "\u001b]7;\u0007" + ], + [ + 0.001006, + "\u001b]7;\u0007" + ], + [ + 5.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000186, + "\u001b[?2004h" + ], + [ + 0.718289, + "\u001b[1m\u001b[31me\u001b[0m\u001b[39m" + ], + [ + 0.19628, + "\b\u001b[0m\u001b[32me\u001b[32mx\u001b[39m" + ], + [ + 0.269637, + "\b\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mx\u001b[1m\u001b[31mp\u001b[0m\u001b[39m" + ], + [ + 0.164388, + "\b\u001b[1m\u001b[31mp\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.332999, + "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.121063, + "\b\b\b\b\b\u001b[0m\u001b[33me\u001b[0m\u001b[33mx\u001b[0m\u001b[33mp\u001b[0m\u001b[33mo\u001b[0m\u001b[33mr\u001b[33mt\u001b[39m" + ], + [ + 0.265335, + " " + ], + [ + 0.311313, + "B" + ], + [ + 0.205307, + "O" + ], + [ + 0.159682, + "R" + ], + [ + 0.141683, + "G" + ], + [ + 0.553563, + "_" + ], + [ + 0.225583, + "P" + ], + [ + 0.10739, + "A" + ], + [ + 0.204722, + "S" + ], + [ + 0.145905, + "S" + ], + [ + 0.312666, + "P" + ], + [ + 0.311469, + "H" + ], + [ + 0.209393, + "R" + ], + [ + 0.069618, + "A" + ], + [ + 0.208505, + "S" + ], + [ + 0.202229, + "E" + ], + [ + 0.719142, + "=" + ], + [ + 0.61979, + "'" + ], + [ + 0.414834, + "1" + ], + [ + 0.208777, + "2" + ], + [ + 0.193519, + "3" + ], + [ + 0.171001, + "4" + ], + [ + 0.542373, + "'" + ], + [ + 0.876006, + "\u001b[?1l\u001b>" + ], + [ + 0.002877, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001161, + "\u001b]2;export BORG_PASSPHRASE='1234' \u0007\u001b]1;export\u0007" + ], + [ + 8.5e-05, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.003438, + "\u001b]7;\u0007" + ], + [ + 0.002065, + "\u001b]7;\u0007" + ], + [ + 0.000146, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000508, + "\u001b[?2004h" + ], + [ + 1.238676, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.273221, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.248131, + "\b\b\u001b[1m#\u001b[1m \u001b[1mP\u001b[0m\u001b[39m" + ], + [ + 0.142137, + "\b\u001b[1mP\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.089312, + "\b\u001b[1mr\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.19919, + "\b\u001b[1mo\u001b[1mb\u001b[0m\u001b[39m" + ], + [ + 0.207691, + "\b\u001b[1mb\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.105529, + "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.075159, + "\b\u001b[1me\u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.625428, + "\b\u001b[1mm\u001b[1m solved, borg will use this automatically… :)\u001b[0m\u001b[39m" + ], + [ + 0.442303, + "\u001b[?1l\u001b>" + ], + [ + 0.0004, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00077, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001065, + "\u001b]7;\u0007" + ], + [ + 0.001105, + "\u001b]7;\u0007" + ], + [ + 2.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000266, + "\u001b[?2004h" + ], + [ + 1.570802, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.218966, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.191279, + "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" + ], + [ + 0.144698, + "\b\u001b[1mW\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.313061, + "\b\u001b[1me\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.245196, + "\b\u001b[1m'\u001b[1mll use this right away…\u001b[0m\u001b[39m" + ], + [ + 0.532339, + "\u001b[?1l\u001b>" + ], + [ + 0.000412, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00062, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001169, + "\u001b]7;\u0007" + ], + [ + 0.00087, + "\u001b]7;\u0007" + ], + [ + 2.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000101, + "\u001b[?1h\u001b=" + ], + [ + 0.000279, + "\u001b[?2004h" + ], + [ + 0.63892, + "\u001b[?1l\u001b>" + ], + [ + 0.000369, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00044, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.002911, + "\u001b]7;\u0007" + ], + [ + 0.002442, + "\u001b]7;\u0007" + ], + [ + 0.000162, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00059, + "\u001b[?2004h" + ], + [ + 0.548725, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.113549, + "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.290577, + "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.262532, + "\b\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.41846, + "\b\u001b[1mA\u001b[1mDVANCED CREATION ##\u001b[0m\u001b[39m" + ], + [ + 0.535376, + "\u001b[?1l\u001b>" + ], + [ + 0.001234, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000938, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.002912, + "\u001b]7;\u0007" + ], + [ + 0.001987, + "\u001b]7;\u0007" + ], + [ + 7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000134, + "\u001b[?1h\u001b=" + ], + [ + 0.000671, + "\u001b[?2004h" + ], + [ + 0.759129, + "\u001b[?1l\u001b>" + ], + [ + 0.000397, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000757, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001297, + "\u001b]7;\u0007" + ], + [ + 0.00131, + "\u001b]7;\u0007" + ], + [ + 3.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.0001, + "\u001b[?1h\u001b=" + ], + [ + 0.000135, + "\u001b[?2004h" + ], + [ + 0.425509, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.233111, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.185443, + "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" + ], + [ + 0.151433, + "\b\u001b[1mW\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.157168, + "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.148414, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.200586, + "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.145343, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.414343, + "\b\u001b[1mn\u001b[1m also use some placeholders in our archive name…\u001b[0m\u001b[39m" + ], + [ + 1.198174, + "\u001b[?1l\u001b>" + ], + [ + 0.000433, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000647, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001494, + "\u001b]7;\u0007" + ], + [ + 0.001069, + "\u001b]7;\u0007" + ], + [ + 8.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 4.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000395, + "\u001b[?2004h" + ], + [ + 0.832499, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.186742, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.076839, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.15706, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.175773, + " " + ], + [ + 0.265231, + "c" + ], + [ + 0.198791, + "r" + ], + [ + 0.162497, + "e" + ], + [ + 0.08856, + "a" + ], + [ + 0.135865, + "t" + ], + [ + 0.112707, + "e" + ], + [ + 0.634063, + " " + ], + [ + 0.621186, + "-" + ], + [ + 0.112118, + "-" + ], + [ + 0.270276, + "s" + ], + [ + 0.135637, + "t" + ], + [ + 0.130994, + "a" + ], + [ + 0.086801, + "t" + ], + [ + 0.119778, + "s" + ], + [ + 0.24882, + " " + ], + [ + 0.47677, + "-" + ], + [ + 0.112232, + "-" + ], + [ + 0.26855, + "p" + ], + [ + 0.218974, + "r" + ], + [ + 0.14527, + "o" + ], + [ + 0.21975, + "g" + ], + [ + 0.104406, + "r" + ], + [ + 0.168975, + "e" + ], + [ + 0.224875, + "s" + ], + [ + 0.161557, + "s" + ], + [ + 0.556139, + " " + ], + [ + 0.90841, + "-" + ], + [ + 0.117065, + "-" + ], + [ + 0.268496, + "c" + ], + [ + 0.118758, + "o" + ], + [ + 0.13892, + "m" + ], + [ + 0.17322, + "p" + ], + [ + 0.146756, + "r" + ], + [ + 0.196139, + "e" + ], + [ + 0.249655, + "s" + ], + [ + 0.157202, + "s" + ], + [ + 0.236521, + "i" + ], + [ + 0.120624, + "o" + ], + [ + 0.175143, + "n" + ], + [ + 0.321073, + " " + ], + [ + 0.249849, + "l" + ], + [ + 0.281988, + "z" + ], + [ + 0.281179, + "4" + ], + [ + 1.223567, + " " + ], + [ + 0.604439, + ":" + ], + [ + 0.099497, + ":" + ], + [ + 0.760652, + "{" + ], + [ + 0.504646, + "u" + ], + [ + 0.249702, + "s" + ], + [ + 0.310204, + "e" + ], + [ + 0.156776, + "r" + ], + [ + 0.927624, + "}" + ], + [ + 0.972074, + "-" + ], + [ + 0.979824, + "{" + ], + [ + 0.397346, + "n" + ], + [ + 0.195251, + "o" + ], + [ + 0.203266, + "w" + ], + [ + 0.716944, + "}" + ], + [ + 0.992466, + " " + ], + [ + 0.404348, + "\u001b[4mW\u001b[24m" + ], + [ + 0.098053, + "\b\u001b[4mW\u001b[4ma\u001b[24m \b" + ], + [ + 0.440872, + "\b\u001b[4ma\u001b[4ml\u001b[24m" + ], + [ + 0.130433, + "\b\u001b[4ml\u001b[4ml\u001b[24m" + ], + [ + 0.079918, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.009903, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 ::{user}-{now} \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 1.432747, + "\u001b[?1l\u001b>" + ], + [ + 0.006238, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001309, + "\u001b]2;borg create --stats --progress --compression lz4 ::{user}-{now} Wallpaper\u0007\u001b]1;borg\u0007" + ], + [ + 0.703285, + "0 B O 0 B C 0 B D 0 N Wallpaper \r" + ], + [ + 0.059704, + "Initializing cache transaction: Reading config \r" + ], + [ + 0.000259, + "Initializing cache transaction: Reading chunks \r" + ], + [ + 0.000283, + "Initializing cache transaction: Reading files \r" + ], + [ + 0.00035, + " \r" + ], + [ + 0.302813, + "Compacting segments 0% \r" + ], + [ + 0.000422, + "Compacting segments 50% \r" + ], + [ + 2.6e-05, + " \r" + ], + [ + 0.053481, + "Saving files cache \r" + ], + [ + 0.010102, + "Saving chunks cache \r" + ], + [ + 0.000354, + "Saving cache config \r" + ], + [ + 0.08865, + " \r" + ], + [ + 2.6e-05, + " \r" + ], + [ + 0.000371, + "------------------------------------------------------------------------------\r\n" + ], + [ + 3.4e-05, + "Archive name: rugk-2017-07-16T18:51:34\r\n" + ], + [ + 8e-06, + "Archive fingerprint: d054cc411324d4bd848b39d1c9cad909073f9ff1a1a503a676d3e050be140396\r\n" + ], + [ + 0.000101, + "Time (start): Sun, 2017-07-16 18:51:34\r\nTime (end): Sun, 2017-07-16 18:51:35\r\n" + ], + [ + 7.5e-05, + "Duration: 0.18 seconds\r\nNumber of files: 1\r\n" + ], + [ + 8.8e-05, + "Utilization of maximum supported archive size: 0%\r\n" + ], + [ + 7e-05, + "------------------------------------------------------------------------------\r\n Original size Compressed size Deduplicated size\r\n" + ], + [ + 1.6e-05, + "This archive: 3.78 MB 3.80 MB 916 B\r\n" + ], + [ + 5.2e-05, + "All archives: 1.86 GB 1.86 GB 561.88 MB\r\n" + ], + [ + 1.3e-05, + "\r\n" + ], + [ + 2.4e-05, + " Unique chunks Total chunks\r\n" + ], + [ + 2.4e-05, + "Chunk index: 1008 3288\r\n" + ], + [ + 2.4e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 0.049018, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00124, + "\u001b]7;\u0007" + ], + [ + 0.000936, + "\u001b]7;\u0007" + ], + [ + 0.000124, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00019, + "\u001b[?2004h" + ], + [ + 0.814358, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.326066, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.279288, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.200695, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.2241, + "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.221056, + "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.341582, + "\b\u001b[1mi\u001b[1mce the backup name.\u001b[0m\u001b[39m" + ], + [ + 1.40396, + "\u001b[?1l\u001b>" + ], + [ + 0.000442, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000701, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00108, + "\u001b]7;\u0007" + ], + [ + 0.000942, + "\u001b]7;\u0007" + ], + [ + 5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 4.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00028, + "\u001b[?2004h" + ], + [ + 1.540998, + "\u001b[?1l\u001b>" + ], + [ + 0.000288, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000571, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.0013, + "\u001b]7;\u0007" + ], + [ + 0.000852, + "\u001b]7;\u0007" + ], + [ + 0.000106, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000164, + "\u001b[?2004h" + ], + [ + 0.402376, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.27499, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.220032, + "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.127907, + "\b\u001b[1mA\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.092357, + "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" + ], + [ + 0.145572, + "\b\u001b[1md\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.222962, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.178534, + "\b\u001b[1mw\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.196668, + "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.225933, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.175493, + "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.119503, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.425112, + "\b\u001b[1mn\u001b[1m put completely different data, with different backup settings, i\u001b[1mn\u001b[1m our backup. It will be deduplicated, anyway:\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.421849, + "\u001b[?1l\u001b>" + ], + [ + 0.000749, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00066, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00197, + "\u001b]7;\u0007" + ], + [ + 0.001476, + "\u001b]7;\u0007" + ], + [ + 5.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000124, + "\u001b[?1h\u001b=" + ], + [ + 0.000525, + "\u001b[?2004h" + ], + [ + 1.444268, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.209812, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.118788, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.145792, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.20446, + " " + ], + [ + 0.309592, + "c" + ], + [ + 0.201447, + "r" + ], + [ + 0.151315, + "e" + ], + [ + 0.084953, + "a" + ], + [ + 0.156918, + "t" + ], + [ + 0.091724, + "e" + ], + [ + 0.324287, + " " + ], + [ + 0.861486, + "-" + ], + [ + 0.134231, + "-" + ], + [ + 0.491182, + "s" + ], + [ + 0.195253, + "t" + ], + [ + 0.097572, + "a" + ], + [ + 0.09545, + "t" + ], + [ + 0.111782, + "s" + ], + [ + 0.301387, + " " + ], + [ + 0.524478, + "-" + ], + [ + 0.112538, + "-" + ], + [ + 0.397406, + "p" + ], + [ + 0.175509, + "r" + ], + [ + 0.203203, + "o" + ], + [ + 0.257392, + "g" + ], + [ + 0.1453, + "r" + ], + [ + 0.174285, + "e" + ], + [ + 0.353531, + "s" + ], + [ + 0.176989, + "s" + ], + [ + 0.386157, + " " + ], + [ + 0.510691, + "-" + ], + [ + 0.115919, + "-" + ], + [ + 0.225102, + "c" + ], + [ + 0.145577, + "o" + ], + [ + 0.133821, + "m" + ], + [ + 0.171364, + "p" + ], + [ + 0.157255, + "r" + ], + [ + 0.162989, + "e" + ], + [ + 0.256274, + "s" + ], + [ + 0.167254, + "s" + ], + [ + 0.253369, + "i" + ], + [ + 0.1197, + "o" + ], + [ + 0.178105, + "n" + ], + [ + 0.824434, + " " + ], + [ + 0.734608, + "z" + ], + [ + 0.237239, + "l" + ], + [ + 0.158877, + "i" + ], + [ + 0.148988, + "b" + ], + [ + 0.289236, + "," + ], + [ + 0.349273, + "6" + ], + [ + 0.618231, + " " + ], + [ + 0.449031, + "-" + ], + [ + 0.119307, + "-" + ], + [ + 0.451923, + "e" + ], + [ + 0.330743, + "x" + ], + [ + 0.232655, + "c" + ], + [ + 0.197384, + "l" + ], + [ + 0.176276, + "u" + ], + [ + 0.104427, + "d" + ], + [ + 0.141163, + "e" + ], + [ + 0.359309, + " " + ], + [ + 1.198529, + "\u001b[4m~\u001b[24m" + ], + [ + 0.338729, + "\b\u001b[4m~\u001b[4m/\u001b[24m" + ], + [ + 0.352573, + "\b\u001b[4m/\u001b[4mD\u001b[24m" + ], + [ + 0.190254, + "\b\u001b[4mD\u001b[4mo\u001b[24m" + ], + [ + 0.113631, + "\b\u001b[4mo\u001b[4mw\u001b[24m" + ], + [ + 0.743216, + "\b\u001b[4mw\u001b[4mn\u001b[24m" + ], + [ + 0.613852, + "\b\u001b[4mn\u001b[4ml\u001b[24m" + ], + [ + 0.121501, + "\b\u001b[4ml\u001b[4mo\u001b[24m" + ], + [ + 0.068625, + "\b\u001b[4mo\u001b[4ma\u001b[24m" + ], + [ + 0.183855, + "\b\u001b[4ma\u001b[4md\u001b[24m" + ], + [ + 0.152099, + "\b\u001b[4md\u001b[4ms\u001b[24m" + ], + [ + 0.793349, + "\b\u001b[4ms\u001b[4m/\u001b[24m" + ], + [ + 0.477575, + "\b\u001b[4m/\u001b[4mb\u001b[24m" + ], + [ + 0.198072, + "\b\u001b[4mb\u001b[4mi\u001b[24m \r\u001b[K" + ], + [ + 0.175276, + "\u001b[A\u001b[77C\u001b[4mi\u001b[4mg\u001b[24m" + ], + [ + 0.647369, + "\r\u001b[4mg\u001b[24m " + ], + [ + 0.439418, + ":" + ], + [ + 0.108932, + ":" + ], + [ + 0.556615, + "{" + ], + [ + 0.244626, + "u" + ], + [ + 0.097534, + "s" + ], + [ + 0.187502, + "e" + ], + [ + 0.16023, + "r" + ], + [ + 0.675542, + "}" + ], + [ + 0.988946, + "-" + ], + [ + 0.545789, + "{" + ], + [ + 0.33121, + "n" + ], + [ + 0.204667, + "o" + ], + [ + 0.141818, + "w" + ], + [ + 0.397217, + "}" + ], + [ + 0.979478, + " " + ], + [ + 0.768118, + "\u001b[4m~\u001b[24m" + ], + [ + 0.589532, + "\b\u001b[4m~\u001b[4m/\u001b[24m" + ], + [ + 0.515186, + "\b\u001b[4m/\u001b[4mD\u001b[24m" + ], + [ + 0.17703, + "\b\u001b[4mD\u001b[4mo\u001b[24m" + ], + [ + 0.121294, + "\b\u001b[4mo\u001b[4mw\u001b[24m" + ], + [ + 0.153543, + "\b\u001b[4mw\u001b[4mn\u001b[24m" + ], + [ + 0.282343, + "\b\u001b[4mn\u001b[4ml\u001b[24m" + ], + [ + 0.129573, + "\b\u001b[4ml\u001b[4mo\u001b[24m" + ], + [ + 0.095125, + "\b\u001b[4mo\u001b[4ma\u001b[24m" + ], + [ + 0.19963, + "\b\u001b[4ma\u001b[4md\u001b[24m" + ], + [ + 0.142667, + "\b\u001b[4md\u001b[4ms\u001b[24m" + ], + [ + 1.499285, + "\u001b[?1l\u001b>" + ], + [ + 0.003081, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000637, + "\u001b]2;borg create --stats --progress --compression zlib,6 --exclude ~/Downloads/big\u0007\u001b]1;borg\u0007" + ], + [ + 0.687457, + "0 B O 0 B C 0 B D 0 N home/rugk/Downloads \r" + ], + [ + 0.025551, + "Initializing cache transaction: Reading config \r" + ], + [ + 0.000326, + "Initializing cache transaction: Reading chunks \r" + ], + [ + 0.000273, + "Initializing cache transaction: Reading files \r" + ], + [ + 0.000394, + " \r" + ], + [ + 0.220691, + "1.31 MB O 1.29 MB C 1.29 MB D 1 N home/rugk/Downloads...chiveWithStuffHere.zip\r" + ], + [ + 0.26224, + "7.70 MB O 6.91 MB C 6.91 MB D 2 N home/rugk/Downloads...droid.gms-11059462.apk\r" + ], + [ + 0.32599, + "Compacting segments 0% \r" + ], + [ + 0.026073, + "Compacting segments 50% \r" + ], + [ + 0.001982, + " \r" + ], + [ + 0.058565, + "Saving files cache \r" + ], + [ + 0.011363, + "Saving chunks cache \r" + ], + [ + 0.000378, + "Saving cache config \r" + ], + [ + 0.12955, + " \r" + ], + [ + 3.4e-05, + " \r" + ], + [ + 0.00039, + "------------------------------------------------------------------------------\r\n" + ], + [ + 1.6e-05, + "Archive name: rugk-2017-07-16T18:52:19\r\n" + ], + [ + 3.1e-05, + "Archive fingerprint: 0de98f590b004ad7545f2013c4c9f2d4e3eed1415d177c89d6c2b7ff05918d2e\r\n" + ], + [ + 2.2e-05, + "Time (start): Sun, 2017-07-16 18:52:19\r\n" + ], + [ + 7.2e-05, + "Time (end): Sun, 2017-07-16 18:52:20\r\nDuration: 0.63 seconds\r\n" + ], + [ + 3e-05, + "Number of files: 6\r\n" + ], + [ + 2.5e-05, + "Utilization of maximum supported archive size: 0%\r\n" + ], + [ + 2.4e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 1.8e-05, + " Original size Compressed size Deduplicated size\r\n" + ], + [ + 2.5e-05, + "This archive: 9.55 MB 8.04 MB 8.04 MB\r\n" + ], + [ + 2.4e-05, + "All archives: 1.87 GB 1.86 GB 569.92 MB\r\n" + ], + [ + 2.5e-05, + "\r\n" + ], + [ + 2.4e-05, + " Unique chunks Total chunks\r\n" + ], + [ + 2.4e-05, + "Chunk index: 1023 3303\r\n" + ], + [ + 2.4e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 0.063104, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001326, + "\u001b]7;\u0007" + ], + [ + 0.001145, + "\u001b]7;\u0007" + ], + [ + 8.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.0002, + "\u001b[?2004h" + ], + [ + 3.131399, + "\u001b[?1l\u001b>" + ], + [ + 0.000281, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00048, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001354, + "\u001b]7;\u0007" + ], + [ + 0.000923, + "\u001b]7;\u0007" + ], + [ + 6.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000161, + "\u001b[?2004h" + ], + [ + 0.285262, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.419379, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.277555, + "\b\b\u001b[1m#\u001b[1m \u001b[1mO\u001b[0m\u001b[39m" + ], + [ + 0.015676, + "\b\u001b[1mO\u001b[0m\u001b[39m" + ], + [ + 0.119839, + "\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.315418, + "\b\u001b[1mr\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.224426, + "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.10624, + "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.170324, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.995665, + "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.139331, + "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.174188, + "\b\u001b[1ms\u001b[1m backup a device via STDIN.\u001b[0m\u001b[39m" + ], + [ + 1.117059, + "\u001b[?1l\u001b>" + ], + [ + 0.000376, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000566, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001272, + "\u001b]7;\u0007" + ], + [ + 0.000893, + "\u001b]7;\u0007" + ], + [ + 8.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 3.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000291, + "\u001b[?2004h" + ], + [ + 2.390246, + "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" + ], + [ + 0.179283, + "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" + ], + [ + 0.08919, + "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" + ], + [ + 0.156134, + "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" + ], + [ + 0.939511, + " " + ], + [ + 0.219491, + "\u001b[32md\u001b[39m" + ], + [ + 0.128817, + "\b\u001b[32md\u001b[32md\u001b[39m" + ], + [ + 0.317081, + " " + ], + [ + 0.206442, + "i" + ], + [ + 0.127682, + "f" + ], + [ + 0.497718, + "=" + ], + [ + 0.79125, + "/" + ], + [ + 0.162326, + "d" + ], + [ + 0.141147, + "e" + ], + [ + 0.17081, + "v" + ], + [ + 0.229501, + "/" + ], + [ + 0.309668, + "s" + ], + [ + 0.201626, + "d" + ], + [ + 0.121565, + "x" + ], + [ + 1.112764, + " " + ], + [ + 0.458342, + "b" + ], + [ + 0.13412, + "s" + ], + [ + 0.426796, + "=" + ], + [ + 0.325514, + "1" + ], + [ + 0.182735, + "0" + ], + [ + 0.635284, + "M" + ], + [ + 0.571527, + " " + ], + [ + 0.644682, + "|" + ], + [ + 0.668689, + " " + ], + [ + 0.368219, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.197192, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.069454, + "\b\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.15983, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.193693, + " " + ], + [ + 0.342177, + "c" + ], + [ + 0.213502, + "r" + ], + [ + 0.165989, + "e" + ], + [ + 0.101269, + "a" + ], + [ + 0.20561, + "t" + ], + [ + 0.172574, + "e" + ], + [ + 0.302751, + " " + ], + [ + 0.524261, + "-" + ], + [ + 0.112867, + "-" + ], + [ + 0.358854, + "p" + ], + [ + 0.158933, + "r" + ], + [ + 0.146881, + "o" + ], + [ + 0.235592, + "g" + ], + [ + 0.153909, + "r" + ], + [ + 0.187519, + "e" + ], + [ + 0.278997, + "s" + ], + [ + 0.161351, + "s" + ], + [ + 0.536239, + " " + ], + [ + 0.472536, + "-" + ], + [ + 0.103445, + "-" + ], + [ + 0.315142, + "s" + ], + [ + 0.188015, + "t" + ], + [ + 0.092463, + "a" + ], + [ + 0.121697, + "t" + ], + [ + 0.108331, + "s" + ], + [ + 0.863705, + " " + ], + [ + 0.547363, + ":" + ], + [ + 0.101957, + ":" + ], + [ + 0.713103, + "s" + ], + [ + 0.172527, + "p" + ], + [ + 0.143374, + "e" + ], + [ + 0.495475, + "c" + ], + [ + 0.184747, + "i" + ], + [ + 0.118626, + "a" + ], + [ + 0.21782, + "l" + ], + [ + 0.61779, + "b" + ], + [ + 0.056813, + "a" + ], + [ + 0.18761, + "c" + ], + [ + 0.116227, + "k" + ], + [ + 0.143399, + "u \r\u001b[K" + ], + [ + 0.31621, + "p" + ], + [ + 0.174943, + "\rp " + ], + [ + 0.964699, + "-" + ], + [ + 1.23368, + "\u001b[?1l\u001b>" + ], + [ + 0.003628, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000824, + "\u001b]2;sudo dd if=/dev/sdx bs=10M | borg create --progress --stats ::specialbackup\u0007\u001b]1;dd\u0007" + ], + [ + 0.023411, + "[sudo] password for rugk: " + ], + [ + 3.286582, + "\r\n" + ], + [ + 0.077852, + "Initializing cache transaction: Reading config \r" + ], + [ + 0.000267, + "Initializing cache transaction: Reading chunks \r" + ], + [ + 0.000293, + "Initializing cache transaction: Reading files \r" + ], + [ + 0.00045, + " \r" + ], + [ + 0.083816, + "8.39 MB O 34.25 kB C 34.25 kB D 0 N stdin \r" + ], + [ + 0.228267, + "41.94 MB O 166.40 kB C 100.50 kB D 0 N stdin \r" + ], + [ + 0.216716, + "75.50 MB O 298.20 kB C 100.50 kB D 0 N stdin \r" + ], + [ + 0.218476, + "109.05 MB O 430.00 kB C 100.50 kB D 0 N stdin \r" + ], + [ + 0.219164, + "142.61 MB O 562.12 kB C 133.77 kB D 0 N stdin \r" + ], + [ + 0.216368, + "176.16 MB O 693.92 kB C 133.77 kB D 0 N stdin \r" + ], + [ + 0.222311, + "209.72 MB O 825.72 kB C 133.77 kB D 0 N stdin \r" + ], + [ + 0.217156, + "243.27 MB O 957.52 kB C 133.77 kB D 0 N stdin \r" + ], + [ + 0.22399, + "276.82 MB O 1.09 MB C 166.77 kB D 0 N stdin \r" + ], + [ + 0.223827, + "310.38 MB O 1.22 MB C 166.77 kB D 0 N stdin \r" + ], + [ + 0.220959, + "343.93 MB O 1.35 MB C 166.77 kB D 0 N stdin \r" + ], + [ + 0.223439, + "377.49 MB O 1.48 MB C 166.77 kB D 0 N stdin \r" + ], + [ + 0.226226, + "411.04 MB O 1.62 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.239743, + "444.60 MB O 1.75 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.229508, + "478.15 MB O 1.88 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.220491, + "511.71 MB O 2.01 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.2504, + "545.26 MB O 2.14 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.241044, + "578.81 MB O 2.28 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.215372, + "612.37 MB O 2.41 MB C 200.04 kB D 0 N stdin \r" + ], + [ + 0.113508, + "60+0 records in\r\n60+0 records out\r\n" + ], + [ + 3.9e-05, + "629145600 bytes (629 MB, 600 MiB) copied, 4.31277 s, 146 MB/s\r\n" + ], + [ + 0.231874, + "Compacting segments 0% \r" + ], + [ + 0.001188, + "Compacting segments 50% \r" + ], + [ + 3.7e-05, + " \r" + ], + [ + 0.078344, + "Saving chunks cache \r" + ], + [ + 0.000348, + "Saving cache config \r" + ], + [ + 0.087821, + " \r" + ], + [ + 2.8e-05, + " \r" + ], + [ + 0.000346, + "------------------------------------------------------------------------------\r\n" + ], + [ + 2.2e-05, + "Archive name: specialbackup\r\n" + ], + [ + 9.7e-05, + "Archive fingerprint: 68e942cc4a48402e48ba87f4887c24e5b9fe06e881b0ca241c791810a108bec0\r\nTime (start): Sun, 2017-07-16 18:52:58\r\n" + ], + [ + 0.000133, + "Time (end): Sun, 2017-07-16 18:53:05\r\nDuration: 6.99 seconds\r\n" + ], + [ + 1.3e-05, + "Number of files: 1\r\n" + ], + [ + 2.2e-05, + "Utilization of maximum supported archive size: 0%\r\n" + ], + [ + 7.3e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 1.1e-05, + " Original size Compressed size Deduplicated size\r\n" + ], + [ + 2.7e-05, + "This archive: 629.15 MB 2.47 MB 234.02 kB\r\n" + ], + [ + 3.3e-05, + "All archives: 2.50 GB 1.87 GB 570.15 MB\r\n" + ], + [ + 3.3e-05, + "\r\n" + ], + [ + 3.3e-05, + " Unique chunks Total chunks\r\n" + ], + [ + 2.4e-05, + "Chunk index: 1032 3380\r\n" + ], + [ + 2.4e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 0.047256, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001383, + "\u001b]7;\u0007" + ], + [ + 0.001024, + "\u001b]7;\u0007" + ], + [ + 8.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " + ], + [ + 7e-06, + "\u001b[K" + ], + [ + 7.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00021, + "\u001b[?2004h" + ], + [ + 3.669021, + "\u001b[?1l\u001b>" + ], + [ + 0.000291, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000719, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001178, + "\u001b]7;\u0007" + ], + [ + 0.0009, + "\u001b]7;\u0007" + ], + [ + 9.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00022, + "\u001b[?2004h" + ], + [ + 0.311851, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.290767, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.23476, + "\b\b\u001b[1m#\u001b[1m \u001b[1mL\u001b[0m\u001b[39m" + ], + [ + 0.188456, + "\b\u001b[1mL\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.139916, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.522516, + "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.157443, + "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.460729, + "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.1201, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.324466, + "\b\u001b[1mc\u001b[1montinue with some simple things:\u001b[0m\u001b[39m" + ], + [ + 0.634167, + "\u001b[?1l\u001b>" + ], + [ + 0.000434, + "\u001b[?2004l\r\r\n" + ], + [ + 0.0006, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00124, + "\u001b]7;\u0007" + ], + [ + 0.001113, + "\u001b]7;\u0007" + ], + [ + 0.00012, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000136, + "\u001b[?1h\u001b=" + ], + [ + 0.000274, + "\u001b[?2004h" + ], + [ + 1.724466, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.116327, + "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.26172, + "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.250198, + "\b\u001b[1m \u001b[1mU\u001b[0m\u001b[39m" + ], + [ + 0.746624, + "\b\u001b[1mU\u001b[1mSEFUL COMMANDS ##\u001b[0m\u001b[39m" + ], + [ + 0.5602, + "\u001b[?1l\u001b>" + ], + [ + 0.001411, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001009, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.003137, + "\u001b]7;\u0007" + ], + [ + 0.002454, + "\u001b]7;\u0007" + ], + [ + 0.000167, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000157, + "\u001b[?1h\u001b=" + ], + [ + 0.000746, + "\u001b[?2004h" + ], + [ + 1.207899, + "\u001b[?1l\u001b>" + ], + [ + 0.000322, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000472, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001289, + "\u001b]7;\u0007" + ], + [ + 0.000891, + "\u001b]7;\u0007" + ], + + [ + 9.5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " + ], + [ + 1.8e-05, + "\u001b[K" + ], + [ + 0.000115, + "\u001b[?1h\u001b=" + ], + [ + 0.000246, + "\u001b[?2004h" + ], + [ + 0.734707, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.247085, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.182467, + "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" + ], + [ + 0.123582, + "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.16343, + "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.183388, + "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.083055, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.187526, + "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.130988, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.142246, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.145489, + "\b\u001b[1m \u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.132155, + "\b\u001b[1ms\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.192915, + "\b\u001b[1mh\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.142644, + "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.149707, + "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.134515, + "\b\u001b[1m \u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.085942, + "\b\u001b[1ms\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.160772, + "\b\u001b[1mo\u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.132016, + "\b\u001b[1mm\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.219601, + "\b\u001b[1me\u001b[1m information about an archive. You can even do it without \u001b[1mn\u001b[1meeding to specify the archive name:\u001b[0m\u001b[39m\u001b[K" + ], + [ + 0.644657, + "\u001b[?1l\u001b>" + ], + [ + 0.000392, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000705, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001347, + "\u001b]7;\u0007" + ], + [ + 0.001099, + "\u001b]7;\u0007" + ], + [ + 4.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.0001, + "\u001b[?1h\u001b=" + ], + [ + 0.000372, + "\u001b[?2004h" + ], + [ + 2.264862, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.182056, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.083939, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.152072, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.142791, + " " + ], + [ + 0.224315, + "i" + ], + [ + 0.130651, + "n" + ], + [ + 0.100647, + "f" + ], + [ + 0.155636, + "o" + ], + [ + 0.716063, + " " + ], + [ + 0.736635, + ":" + ], + [ + 0.107352, + ":" + ], + [ + 0.289804, + " " + ], + [ + 0.436564, + "-" + ], + [ + 0.131871, + "-" + ], + [ + 0.824072, + "l" + ], + [ + 0.061945, + "a" + ], + [ + 0.136723, + "s" + ], + [ + 0.143197, + "t" + ], + [ + 0.186833, + " " + ], + [ + 0.125784, + "1" + ], + [ + 0.924568, + "\u001b[?1l\u001b>" + ], + [ + 0.002555, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00096, + "\u001b]2;borg info :: --last 1\u0007\u001b]1;borg\u0007" + ], + [ + 0.693043, + "Archive name: specialbackup\r\nArchive fingerprint: 68e942cc4a48402e48ba87f4887c24e5b9fe06e881b0ca241c791810a108bec0\r\nComment: \r\nHostname: tux\r\nUsername: rugk\r\nTime (start): Sun, 2017-07-16 18:52:58\r\nTime (end): Sun, 2017-07-16 18:53:05\r\nDuration: 6.99 seconds\r\nNumber of files: 1\r\nCommand line: borg create --progress --stats ::specialbackup -\r\nUtilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n Original size Compressed size Deduplicated size\r\nThis archive: 629.15 MB 2.47 MB 234.02 kB\r\nAll archives: 2.50 GB 1.87 GB 570.15 MB\r\n\r\n Unique chunks Total chunks\r\nChunk index: 1032 3380\r\n" + ], + [ + 0.045207, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001204, + "\u001b]7;\u0007" + ], + [ + 0.000923, + "\u001b]7;\u0007" + ], + [ + 3.5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000129, + "\u001b[?1h\u001b=" + ], + [ + 0.000196, + "\u001b[?2004h" + ], + [ + 1.70302, + "\u001b[?1l\u001b>" + ], + [ + 0.000314, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000475, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001262, + "\u001b]7;\u0007" + ], + [ + 0.00098, + "\u001b]7;\u0007" + ], + [ + 4.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000164, + "\u001b[?2004h" + ], + [ + 0.281651, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.234109, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.181326, + "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" + ], + [ + 0.12398, + "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.166912, + "\b\u001b[1mo\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.490114, + "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.160581, + "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.148283, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.453708, + "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.118956, + "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.125062, + "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.130519, + "\b\u001b[1m \u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.130132, + "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.265033, + "\b\u001b[1me\u001b[1mname our last archive:\u001b[0m\u001b[39m" + ], + [ + 1.001935, + "\u001b[?1l\u001b>" + ], + [ + 0.000416, + "\u001b[?2004l\r\r\n" + ], + [ + 0.0006, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00114, + "\u001b]7;\u0007" + ], + [ + 0.000898, + "\u001b]7;\u0007" + ], + [ + 2.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000286, + "\u001b[?2004h" + ], + [ + 1.253113, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.202007, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.105752, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.134948, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.14764, + " " + ], + [ + 0.157682, + "r" + ], + [ + 0.124491, + "e" + ], + [ + 0.118993, + "n" + ], + [ + 0.140445, + "a" + ], + [ + 0.101365, + "m" + ], + [ + 0.115953, + "e" + ], + [ + 1.107064, + " " + ], + [ + 0.561405, + ":" + ], + [ + 0.103305, + ":" + ], + [ + 0.263633, + "s" + ], + [ + 0.142089, + "p" + ], + [ + 0.134253, + "e" + ], + [ + 0.240688, + "c" + ], + [ + 0.136782, + "i" + ], + [ + 0.128372, + "a" + ], + [ + 0.170065, + "l" + ], + [ + 0.592209, + "b" + ], + [ + 0.348417, + "a" + ], + [ + 0.210896, + "c" + ], + [ + 0.259528, + "k" + ], + [ + 0.171523, + "u" + ], + [ + 0.245786, + "p" + ], + [ + 0.582735, + " " + ], + [ + 0.568884, + "b" + ], + [ + 0.101982, + "a" + ], + [ + 0.162673, + "c" + ], + [ + 0.104218, + "k" + ], + [ + 0.132828, + "u" + ], + [ + 0.245157, + "p" + ], + [ + 0.266242, + "-" + ], + [ + 0.316388, + "b" + ], + [ + 0.43535, + "l" + ], + [ + 0.133908, + "o" + ], + [ + 0.047013, + "c" + ], + [ + 0.622041, + "k" + ], + [ + 0.82215, + "-" + ], + [ + 0.183882, + "d" + ], + [ + 0.189034, + "e" + ], + [ + 0.181902, + "v" + ], + [ + 0.18728, + "i" + ], + [ + 0.052242, + "c" + ], + [ + 0.160462, + "e" + ], + [ + 0.645053, + "\u001b[?1l\u001b>" + ], + [ + 0.001146, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000741, + "\u001b]2;borg rename ::specialbackup backup-block-device\u0007\u001b]1;borg\u0007" + ], + [ + 1.136038, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001149, + "\u001b]7;\u0007" + ], + [ + 0.000968, + "\u001b]7;\u0007" + ], + [ + 7.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000107, + "\u001b[?1h\u001b=" + ], + [ + 0.000193, + "\u001b[?2004h" + ], + [ + 1.203902, + "\u001b[32mborg\u001b[39m rename ::specialbackup backup-block-device" + ], + [ + 0.192203, + "\u001b[47D\u001b[1m#\u001b[1m \u001b[1mS\u001b[1mo\u001b[1m \u001b[1ml\u001b[1me\u001b[1mt\u001b[1m'\u001b[1ms\u001b[1m \u001b[1mr\u001b[1me\u001b[1mn\u001b[1ma\u001b[1mm\u001b[1me\u001b[1m \u001b[1mo\u001b[1mu\u001b[1mr\u001b[1m \u001b[1ml\u001b[1ma\u001b[1ms\u001b[1mt\u001b[1m \u001b[1ma\u001b[1mr\u001b[1mc\u001b[1mh\u001b[1mi\u001b[1mv\u001b[1me\u001b[1m:\u001b[0m\u001b[39m \u001b[12D" + ], + [ + 0.528657, + "\u001b[35D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mi\u001b[0m\u001b[39mn\u001b[0m\u001b[39mf\u001b[0m\u001b[39mo\u001b[0m\u001b[39m \u001b[0m\u001b[39m:\u001b[0m\u001b[39m:\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ml\u001b[0m\u001b[39ma\u001b[0m\u001b[39ms\u001b[0m\u001b[39mt\u001b[0m\u001b[39m \u001b[0m\u001b[39m1\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[14D" + ], + [ + 0.548884, + "\u001b[?1l\u001b>" + ], + [ + 0.003595, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000857, + "\u001b]2;borg info :: --last 1\u0007\u001b]1;borg\u0007" + ], + [ + 0.689879, + "Archive name: backup-block-device\r\nArchive fingerprint: 5fd9732b4809252742a7cb3fadf2a971dd6371afd11a07944c0b5803d57c240f\r\nComment: \r\nHostname: tux\r\nUsername: rugk\r\nTime (start): Sun, 2017-07-16 18:52:58\r\nTime (end): Sun, 2017-07-16 18:53:05\r\nDuration: 6.99 seconds\r\nNumber of files: 1\r\nCommand line: borg create --progress --stats ::specialbackup -\r\nUtilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n Original size Compressed size Deduplicated size\r\nThis archive: 629.15 MB 2.47 MB 234.04 kB\r\nAll archives: 2.50 GB 1.87 GB 570.15 MB\r\n\r\n Unique chunks Total chunks\r\nChunk index: 1032 3380\r\n" + ], + [ + 0.044772, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001259, + "\u001b]7;\u0007" + ], + [ + 0.001013, + "\u001b]7;\u0007" + ], + [ + 8.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000109, + "\u001b[?1h\u001b=" + ], + [ + 0.000191, + "\u001b[?2004h" + ], + [ + 2.415375, + "\u001b[?1l\u001b>" + ], + [ + 0.000379, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000632, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001347, + "\u001b]7;\u0007" + ], + [ + 0.001044, + "\u001b]7;\u0007" + ], + [ + 8.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000101, + "\u001b[?1h\u001b=" + ], + [ + 0.000183, + "\u001b[?2004h" + ], + [ + 0.412865, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.250988, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.245192, + "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.706056, + "\b\u001b[1mA\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.273409, + "\b\u001b[1m \u001b[1mv\u001b[0m\u001b[39m" + ], + [ + 0.194462, + "\b\u001b[1mv\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.114445, + "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.097756, + "\b\u001b[1mr\u001b[1my\u001b[0m\u001b[39m" + ], + [ + 0.149155, + "\b\u001b[1my\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.258303, + "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.133528, + "\b\u001b[1mi\u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.225062, + "\b\u001b[1mm\u001b[1mp\u001b[0m\u001b[39m" + ], + [ + 0.352638, + "\b\u001b[1mp\u001b[1mortant step if you choose keyfile mode (where the keyfile is onl\u001b[1my\u001b[1m saved locally) is to export your keyfile and possibly print it, etc.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.170303, + "\u001b[?1l\u001b>" + ], + [ + 0.000524, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000714, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001487, + "\u001b]7;\u0007" + ], + [ + 0.001303, + "\u001b]7;\u0007" + ], + [ + 3.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000291, + "\u001b[?2004h" + ], + [ + 2.080689, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.197142, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.172626, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.145083, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.943024, + " " + ], + [ + 0.511742, + "k" + ], + [ + 0.274338, + "e" + ], + [ + 0.308416, + "y" + ], + [ + 0.568141, + " " + ], + [ + 0.62626, + "e" + ], + [ + 0.224255, + "x" + ], + [ + 2.028973, + "p" + ], + [ + 0.220629, + "o" + ], + [ + 0.395617, + "r" + ], + [ + 0.127004, + "t" + ], + [ + 0.635262, + " " + ], + [ + 0.728631, + ":" + ], + [ + 0.116567, + ":" + ], + [ + 0.347323, + " " + ], + [ + 1.713208, + "-" + ], + [ + 0.134471, + "-" + ], + [ + 0.298094, + "q" + ], + [ + 0.316108, + "r" + ], + [ + 0.373821, + "-" + ], + [ + 0.416623, + "c" + ], + [ + 0.400783, + "o" + ], + [ + 0.107762, + "d" + ], + [ + 0.134276, + "e" + ], + [ + 0.384438, + " " + ], + [ + 0.447909, + "f" + ], + [ + 0.162017, + "i" + ], + [ + 0.113187, + "l" + ], + [ + 0.069321, + "e" + ], + [ + 0.627894, + "." + ], + [ + 0.32877, + "h" + ], + [ + 0.137354, + "t" + ], + [ + 0.181468, + "m" + ], + [ + 0.156847, + "l" + ], + [ + 0.434616, + " " + ], + [ + 0.906636, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.546016, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 1.755972, + "\b\u001b[1m \u001b[1mthis creates a nice HTML, but when \u001b[1my\u001b[1mou want something simpler…\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.940038, + "\b\b\u001b[1mr\u001b[0m\u001b[39m\u001b[K" + ], + [ + 0.691374, + "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.501031, + "\b\b\u001b[1ml\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.0295, + "\b\b\u001b[1mp\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029695, + "\b\b\u001b[1mm\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029437, + "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03032, + "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029433, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030373, + "\b\b\u001b[1mg\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029337, + "\b\b\u001b[1mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.031058, + "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029329, + "\b\b\u001b[1mh\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.031142, + "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029181, + "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029786, + "\b\b\u001b[1mm\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030603, + "\b\b\u001b[1mo\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029332, + "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030813, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029428, + "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029368, + "\b\b\u001b[1mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030166, + "\b\b\u001b[1ma\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030524, + "\b\b\u001b[1mw\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029333, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030607, + "\b\b\u001b[1mu\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029346, + "\r\u001b[1my\u001b[1mo\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.031102, + "\r\u001b[1my\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029544, + "\u001b[A\u001b[76C\u001b[1m \u001b[0m\u001b[39m \u001b[K\r" + ], + [ + 0.029675, + "\u001b[A\u001b[76C\u001b[1mn\u001b[0m\u001b[39m\u001b[K\u001b[1B\r\u001b[K\u001b[A\u001b[77C" + ], + [ + 0.030809, + "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.02987, + "\b\b\u001b[1mh\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029707, + "\b\b\u001b[1mw\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029901, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03057, + "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029469, + "\b\b\u001b[1mu\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030219, + "\b\b\u001b[1mb\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029227, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030465, + "\b\b\u001b[1m,\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029423, + "\b\b\u001b[1mL\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030292, + "\b\b\u001b[1mM\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030715, + "\b\b\u001b[1mT\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029641, + "\b\b\u001b[1mH\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029367, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.031235, + "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030119, + "\b\b\u001b[1mc\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030061, + "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030102, + "\b\b\u001b[1mn\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029384, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029499, + "\b\b\u001b[1ma\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03047, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03019, + "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029337, + "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030138, + "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030049, + "\b\b\u001b[1ma\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030132, + "\b\b\u001b[1me\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029948, + "\b\b\u001b[1mr\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029428, + "\b\b\u001b[1mc\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030197, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.030196, + "\b\b\u001b[1ms\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03118, + "\b\b\u001b[1mi\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.028165, + "\b\b\u001b[1mh\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03128, + "\b\b\u001b[1mt\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.029716, + "\b\b\u001b[1m \u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.03012, + "\b\b\u001b[1m#\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.346808, + "\b\u001b[0m\u001b[39m \b" + ], + [ + 0.19843, + "\b" + ], + [ + 0.307235, + "\b \b" + ], + [ + 0.499683, + "\b \b" + ], + [ + 0.028468, + "\b \b" + ], + [ + 0.029472, + "\b \b" + ], + [ + 0.030565, + "\b \b" + ], + [ + 0.029224, + "\b \b" + ], + [ + 0.030493, + "\b \b" + ], + [ + 0.030666, + "\b \b" + ], + [ + 0.029185, + "\b \b" + ], + [ + 0.02989, + "\b" + ], + [ + 0.029921, + "\b \b" + ], + [ + 0.029657, + "\b \b" + ], + [ + 0.154399, + "\b \b" + ], + [ + 0.165915, + "\b \b" + ], + [ + 0.154316, + "\b \b" + ], + [ + 0.154588, + "\b \b" + ], + [ + 0.147868, + "\b \b" + ], + [ + 1.555865, + "p" + ], + [ + 0.446126, + "a" + ], + [ + 0.188714, + "p" + ], + [ + 0.252833, + "e" + ], + [ + 0.142044, + "r" + ], + [ + 0.395895, + " " + ], + [ + 0.423453, + "\u001b[1m# this is a \"manual input\"-only backup (but it is\u001b[1m \u001b[1malso included in the --qr-code option)\u001b[0m\u001b[39m\u001b[K" + ], + [ + 3.71528, + "\u001b[?1l\u001b>" + ], + [ + 0.001413, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000757, + "\u001b]2;borg key export :: --paper\u0007\u001b]1;borg\u0007" + ], + [ + 0.550352, + "To restore key use borg key import --paper /path/to/repo\r\n\r\nBORG PAPER KEY v1\r\nid: 20 / 54f957 2d6d72 de8280 / 158a57 45bdc3 - f6\r\n 1: 86a961 6c676f 726974 686da6 736861 323536 - 14\r\n 2: a46461 7461da 00def1 7c9f3c 81ebc6 730a05 - 35\r\n 3: 12453e d02760 ffdeef 4d0daa 231d81 ae10d8 - e5\r\n 4: 7bb0a1 97c30f 312b61 7170ba d1ea91 da2c88 - 30\r\n 5: ca997e 177b74 38f906 709a66 fbf013 40ab3d - c4\r\n 6: 6af94b 8a36a9 e07b9d b0e08d 3935cd f1bbb9 - 5c\r\n 7: 2b10b6 ebb586 4c0967 f682b9 c64358 fbb63c - a4\r\n 8: b9fc94 240d08 072524 98b619 7bd1c5 21094e - ec\r\n 9: ac4f05 d65a6a 7f8a0d 8cc14e 405b36 c248e1 - 79\r\n10: d23b89 c61074 3e68c9 79c683 2384e8 cd9f82 - 50\r\n11: fc76a9 3f2a9e 05d5f1 313f95 ec4313 53e0c1 - 4a\r\n12: 654f1d ab2b51 2ccbe8 80be07 b6132f 86aeb5 - 11\r\n13: 7e6e48 5ff0d4 41e659 a421f0 5123df f88dff - c9\r\n14: 03db58 bbb410 87d7fc 075b14 5108a4 686173 - 9a\r\n15: 68da00 20524b 8769e9 e5bd18 a9b431 c05b49 - ba\r\n16: 505280 9b104a b081c0 f4efd1 1d3771 34c701 - 40\r\n17: aa6974 657261 74696f 6e73ce 000186 a0a473 - 15\r\n18: 616c7" + ], + [ + 7.2e-05, + "4 da0020 0be74e e1e9af 7b1364 3ee362 - 32\r\n19: 643069 b57a75 d30eb6 104c28 367e17 7dd4d9 - 79\r\n20: f556a7 766572 73696f 6e01 - 32\r\n\r\n" + ], + [ + 0.048873, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001193, + "\u001b]7;\u0007" + ], + [ + 0.000921, + "\u001b]7;\u0007" + ], + [ + 9.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000185, + "\u001b[?2004h" + ], + [ + 3.146565, + "\u001b[?1l\u001b>" + ], + [ + 0.000424, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000795, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001307, + "\u001b]7;\u0007" + ], + [ + 0.001444, + "\u001b]7;\u0007" + ], + [ + 8.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.00011, + "\u001b[?1h\u001b=" + ], + [ + 0.000263, + "\u001b[?2004h" + ], + [ + 0.441809, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.136081, + "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.375389, + "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.284554, + "\b\u001b[1m \u001b[1mM\u001b[0m\u001b[39m" + ], + [ + 0.395833, + "\b\u001b[1mM\u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.434316, + "\b\u001b[1mA\u001b[1mINTENANCE ##\u001b[0m\u001b[39m" + ], + [ + 1.471226, + "\u001b[?1l\u001b>" + ], + [ + 0.00055, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000605, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001464, + "\u001b]7;\u0007" + ], + [ + 0.00092, + "\u001b]7;\u0007" + ], + [ + 9.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000104, + "\u001b[?1h\u001b=" + ], + [ + 0.000309, + "\u001b[?2004h" + ], + [ + 0.977805, + "\u001b[?1l\u001b>" + ], + [ + 0.000452, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000828, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001436, + "\u001b]7;\u0007" + ], + [ + 0.001464, + "\u001b]7;\u0007" + ], + [ + 3.8e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000171, + "\u001b[?1h\u001b=" + ], + [ + 0.000247, + "\u001b[?2004h" + ], + [ + 0.221358, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.374414, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.189751, + "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" + ], + [ + 0.087275, + "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.140008, + "\b\u001b[1mo\u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.150891, + "\b\u001b[1mm\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.387855, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.204067, + "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.127209, + "\b\u001b[1mi\u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.073999, + "\b\u001b[1mm\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.130356, + "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.224406, + "\b\u001b[1ms\u001b[1m backups get broken or we want a regular \"checkup\" that everythin\u001b[1mg\u001b[1m is okay…\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.361948, + "\u001b[?1l\u001b>" + ], + [ + 0.000402, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000743, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001212, + "\u001b]7;\u0007" + ], + [ + 0.000923, + "\u001b]7;\u0007" + ], + [ + 1.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000321, + "\u001b[?2004h" + ], + [ + 2.246766, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.18622, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.121068, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.146401, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.255479, + " " + ], + [ + 0.268833, + "c" + ], + [ + 0.154418, + "h" + ], + [ + 0.106649, + "e" + ], + [ + 0.142762, + "c" + ], + [ + 0.306359, + "k" + ], + [ + 0.697455, + " " + ], + [ + 1.113236, + "-" + ], + [ + 0.768765, + "v" + ], + [ + 0.477353, + " " + ], + [ + 0.387303, + ":" + ], + [ + 0.102251, + ":" + ], + [ + 0.749971, + "\u001b[?1l\u001b>" + ], + [ + 0.001961, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000798, + "\u001b]2;borg check -v ::\u0007\u001b]1;borg\u0007" + ], + [ + 0.54272, + "Starting repository check\r\n" + ], + [ + 1.152819, + "Starting repository index check\r\n" + ], + [ + 0.00038, + "Completed repository check, no problems found.\r\n" + ], + [ + 0.000129, + "Starting archive consistency check...\r\n" + ], + [ + 0.095799, + "Analyzing archive backup1 (1/6)\r\n" + ], + [ + 0.109358, + "Analyzing archive backup2 (2/6)\r\n" + ], + [ + 0.036555, + "Analyzing archive backup3 (3/6)\r\n" + ], + [ + 0.03649, + "Analyzing archive rugk-2017-07-16T18:51:34 (4/6)\r\n" + ], + [ + 0.000491, + "Analyzing archive rugk-2017-07-16T18:52:19 (5/6)\r\n" + ], + [ + 0.000729, + "Analyzing archive backup-block-device (6/6)\r\n" + ], + [ + 0.00119, + "Archive consistency check complete, no problems found.\r\n" + ], + [ + 0.081895, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001153, + "\u001b]7;\u0007" + ], + [ + 0.000924, + "\u001b]7;\u0007" + ], + [ + 0.000108, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00022, + "\u001b[?2004h" + ], + [ + 2.243609, + "\u001b[?1l\u001b>" + ], + [ + 0.000511, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000535, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001789, + "\u001b]7;\u0007" + ], + [ + 0.00157, + "\u001b]7;\u0007" + ], + [ + 0.000139, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.7e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00033, + "\u001b[?2004h" + ], + [ + 0.326751, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.24289, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.285802, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.191158, + "\b\u001b[1mN\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.184029, + "\b\u001b[1me\u001b[1mx\u001b[0m\u001b[39m" + ], + [ + 0.16373, + "\b\u001b[1mx\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.239936, + "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.27885, + "\b\u001b[1m \u001b[1mp\u001b[0m\u001b[39m" + ], + [ + 0.12665, + "\b\u001b[1mp\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.154792, + "\b\u001b[1mr\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.372203, + "\b\u001b[1mo\u001b[1mblem: Usually you do not have infinite disk space. So you may need\u001b[1m \u001b[1mto prune your archive…\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.956234, + "\u001b[?1l\u001b>" + ], + [ + 0.000446, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000607, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001281, + "\u001b]7;\u0007" + ], + [ + 0.000983, + "\u001b]7;\u0007" + ], + [ + 2.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000142, + "\u001b[?1h\u001b=" + ], + [ + 0.00032, + "\u001b[?2004h" + ], + [ + 1.137641, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.26675, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.151609, + "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" + ], + [ + 0.11765, + "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.158458, + "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.149615, + "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.080657, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.144379, + "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.104266, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.132218, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.202965, + "\b\u001b[1m \u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.17807, + "\b\u001b[1mt\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.123814, + "\b\u001b[1mu\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.325016, + "\b\u001b[1mn\u001b[1me this in every detail. See the docs for details. Here only a s\u001b[1mi\u001b[1mmple example:\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.91505, + "\u001b[?1l\u001b>" + ], + [ + 0.000406, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000684, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001347, + "\u001b]7;\u0007" + ], + [ + 0.001084, + "\u001b]7;\u0007" + ], + [ + 0.000116, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000118, + "\u001b[?1h\u001b=" + ], + [ + 0.000246, + "\u001b[?2004h" + ], + [ + 2.556304, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.198214, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.125589, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.147156, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.202848, + " " + ], + [ + 0.369539, + "p" + ], + [ + 0.228714, + "r" + ], + [ + 0.184236, + "u" + ], + [ + 0.154014, + "n" + ], + [ + 0.136362, + "e" + ], + [ + 0.94169, + " " + ], + [ + 0.44829, + "-" + ], + [ + 0.112062, + "-" + ], + [ + 0.37454, + "l" + ], + [ + 0.157195, + "i" + ], + [ + 0.116633, + "s" + ], + [ + 0.193515, + "t" + ], + [ + 0.486369, + " " + ], + [ + 0.442107, + "-" + ], + [ + 0.12257, + "-" + ], + [ + 0.403774, + "k" + ], + [ + 0.214488, + "e" + ], + [ + 0.771743, + "e" + ], + [ + 0.349591, + "p" + ], + [ + 0.352253, + "-" + ], + [ + 0.201267, + "l" + ], + [ + 0.109728, + "a" + ], + [ + 0.146296, + "s" + ], + [ + 0.130476, + "t" + ], + [ + 0.234998, + " " + ], + [ + 0.264266, + "1" + ], + [ + 0.429572, + " " + ], + [ + 0.505667, + "-" + ], + [ + 0.105697, + "-" + ], + [ + 0.294354, + "d" + ], + [ + 0.178175, + "r" + ], + [ + 0.239011, + "y" + ], + [ + 0.561933, + "-" + ], + [ + 0.220564, + "r" + ], + [ + 0.172983, + "u" + ], + [ + 0.138969, + "n" + ], + [ + 0.891028, + "\u001b[?1l\u001b>" + ], + [ + 0.004152, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000975, + "\u001b]2;borg prune --list --keep-last 1 --dry-run\u0007\u001b]1;borg\u0007" + ], + [ + 0.658906, + "Keeping archive: backup-block-device Sun, 2017-07-16 18:52:58 [5fd9732b4809252742a7cb3fadf2a971dd6371afd11a07944c0b5803d57c240f]\r\n" + ], + [ + 0.000155, + "Would prune: rugk-2017-07-16T18:52:19 Sun, 2017-07-16 18:52:19 [0de98f590b004ad7545f2013c4c9f2d4e3eed1415d177c89d6c2b7ff05918d2e]\r\n" + ], + [ + 0.000118, + "Would prune: rugk-2017-07-16T18:51:34 Sun, 2017-07-16 18:51:34 [d054cc411324d4bd848b39d1c9cad909073f9ff1a1a503a676d3e050be140396]\r\n" + ], + [ + 6.5e-05, + "Would prune: backup3 Fri, 2017-07-14 21:55:37 [36cd8fdf9b8b2e3bbb3fc2bb600acd48609efaf3a0880f900e0701a47ff69d4d]\r\n" + ], + [ + 7.1e-05, + "Would prune: backup2 Fri, 2017-07-14 21:54:56 [5aaf03d1c710cf774f9c9ff1c6317b621c14e519c6bac459f6d64b31e3bbd200]\r\n" + ], + [ + 7.1e-05, + "Would prune: backup1 Fri, 2017-07-14 21:54:06 [9758c7db339a066360bffad17b2ffac4fb368c6722c0be3a47a7a9b631f06407]\r\n" + ], + [ + 0.047362, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001109, + "\u001b]7;\u0007" + ], + [ + 0.00093, + "\u001b]7;\u0007" + ], + [ + 7.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000163, + "\u001b[?2004h" + ], + [ + 2.173126, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.420696, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.658252, + "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" + ], + [ + 0.186236, + "\b\u001b[1mW\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.09843, + "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.143515, + "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.153626, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.136407, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.170555, + "\b\u001b[1ma\u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.157309, + "\b\u001b[1mc\u001b[1mtually executing it in a script, you have to use it without the --dry\u001b[1m-\u001b[1mrun option, of course.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.08243, + "\u001b[?1l\u001b>" + ], + [ + 0.000512, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000552, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001375, + "\u001b]7;\u0007" + ], + [ + 0.000922, + "\u001b]7;\u0007" + ], + [ + 3.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00026, + "\u001b[?2004h" + ], + [ + 1.169356, + "\u001b[?1l\u001b>" + ], + [ + 0.000602, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000917, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001594, + "\u001b]7;\u0007" + ], + [ + 0.001826, + "\u001b]7;\u0007" + ], + [ + 7.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000204, + "\u001b[?1h\u001b=" + ], + [ + 0.000349, + "\u001b[?2004h" + ], + [ + 0.464206, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.135956, + "\b\u001b[1m#\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.484249, + "\b\b\u001b[1m#\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.299809, + "\b\u001b[1m \u001b[1mR\u001b[0m\u001b[39m" + ], + [ + 0.199072, + "\b\u001b[1mR\u001b[1mE\u001b[0m\u001b[39m" + ], + [ + 0.620669, + "\b\u001b[1mE\u001b[1mSTORE ##\u001b[0m\u001b[39m" + ], + [ + 0.924028, + "\u001b[?1l\u001b>" + ], + [ + 0.000399, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000744, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001142, + "\u001b]7;\u0007" + ], + [ + 0.000834, + "\u001b]7;\u0007" + ], + [ + 9.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000124, + "\u001b[?1h\u001b=" + ], + [ + 0.000294, + "\u001b[?2004h" + ], + [ + 0.797042, + "\u001b[?1l\u001b>" + ], + [ + 0.000325, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001543, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.002662, + "\u001b]7;\u0007" + ], + [ + 0.001568, + "\u001b]7;\u0007" + ], + [ + 4.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000185, + "\u001b[?2004h" + ], + [ + 0.705049, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.50212, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 1.210452, + "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" + ], + [ + 0.1987, + "\b\u001b[1mW\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.12116, + "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.152173, + "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.16582, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.378037, + "\b\u001b[1m \u001b[1my\u001b[0m\u001b[39m" + ], + [ + 0.330829, + "\b\u001b[1my\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.180945, + "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.152701, + "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.121298, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.148067, + "\b\u001b[1mw\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.233865, + "\b\u001b[1ma\u001b[1mnt to see the diff between two archives use this command.\u001b[0m\u001b[39m" + ], + [ + 1.947763, + "\u001b[?1l\u001b>" + ], + [ + 0.000408, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000607, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001033, + "\u001b]7;\u0007" + ], + [ + 0.000979, + "\u001b]7;\u0007" + ], + [ + 0.000127, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000278, + "\u001b[?2004h" + ], + [ + 0.693036, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.275798, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.281158, + "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" + ], + [ + 0.386709, + "\b\u001b[1mE\u001b[1m.\u001b[0m\u001b[39m" + ], + [ + 0.136187, + "\b\u001b[1m.\u001b[1mg\u001b[0m\u001b[39m" + ], + [ + 0.262011, + "\b\u001b[1mg\u001b[1m.\u001b[0m\u001b[39m" + ], + [ + 0.234889, + "\b\u001b[1m.\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.361971, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.162798, + "\b\u001b[1mw\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.077265, + "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.148774, + "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.34541, + "\b\u001b[1mt\u001b[1m happened between the first two backups?\u001b[0m\u001b[39m" + ], + [ + 1.295996, + "\u001b[?1l\u001b>" + ], + [ + 0.000733, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001102, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001634, + "\u001b]7;\u0007" + ], + [ + 0.000634, + "\u001b]7;\u0007" + ], + [ + 5.6e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000303, + "\u001b[?2004h" + ], + [ + 0.441685, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.182795, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.072867, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.161104, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.179655, + " " + ], + [ + 0.154676, + "d" + ], + [ + 0.132421, + "i" + ], + [ + 0.124239, + "f" + ], + [ + 0.13999, + "f" + ], + [ + 0.624444, + " " + ], + [ + 0.862302, + ":" + ], + [ + 0.1169, + ":" + ], + [ + 0.274626, + "b" + ], + [ + 0.100778, + "a" + ], + [ + 0.188526, + "c" + ], + [ + 0.097402, + "k" + ], + [ + 0.144999, + "u" + ], + [ + 0.22317, + "p" + ], + [ + 0.167969, + "1" + ], + [ + 0.44642, + " " + ], + [ + 0.240129, + "b" + ], + [ + 0.164579, + "a" + ], + [ + 0.190471, + "c" + ], + [ + 0.136211, + "k" + ], + [ + 0.12257, + "u" + ], + [ + 0.258587, + "p" + ], + [ + 0.215453, + "2" + ], + [ + 1.160869, + "\u001b[?1l\u001b>" + ], + [ + 0.001983, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000801, + "\u001b]2;borg diff ::backup1 backup2\u0007\u001b]1;borg\u0007" + ], + [ + 0.717522, + "added 20 B Wallpaper/newfile.txt\r\n" + ], + [ + 0.044186, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001157, + "\u001b]7;\u0007" + ], + [ + 0.000949, + "\u001b]7;\u0007" + ], + [ + 0.000108, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000147, + "\u001b[?2004h" + ], + [ + 1.545435, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.26435, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.178864, + "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.161899, + "\b\u001b[1mA\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.240289, + "\b\u001b[1mh\u001b[1m,\u001b[0m\u001b[39m" + ], + [ + 0.132971, + "\b\u001b[1m,\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.115812, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.111227, + "\b\u001b[1mw\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.159647, + "\b\u001b[1me\u001b[1m added a file, right…\u001b[0m\u001b[39m" + ], + [ + 0.97686, + "\u001b[?1l\u001b>" + ], + [ + 0.000441, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00091, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001031, + "\u001b]7;\u0007" + ], + [ + 0.000995, + "\u001b]7;\u0007" + ], + [ + 2.5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000141, + "\u001b[?1h\u001b=" + ], + [ + 0.000303, + "\u001b[?2004h" + ], + [ + 6.370198, + "\u001b[?1l\u001b>" + ], + [ + 0.000854, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000815, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.003101, + "\u001b]7;\u0007" + ], + [ + 0.002831, + "\u001b]7;\u0007" + ], + [ + 0.000107, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000314, + "\u001b[?1h\u001b=" + ], + [ + 0.000499, + "\u001b[?2004h" + ], + [ + 0.580198, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.240323, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.29592, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.135389, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.115437, + "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.157526, + "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.624235, + "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.282742, + "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.133006, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.206434, + "\b\u001b[1ma\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.13301, + "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.255991, + "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.196416, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.275594, + "\b\u001b[1ma\u001b[1mlso other ways to extract the data.\u001b[0m\u001b[39m" + ], + [ + 0.932018, + "\u001b[?1l\u001b>" + ], + [ + 0.001354, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001071, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00297, + "\u001b]7;\u0007" + ], + [ + 0.002675, + "\u001b]7;\u0007" + ], + [ + 0.000154, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000231, + "\u001b[?1h\u001b=" + ], + [ + 0.000895, + "\u001b[?2004h" + ], + [ + 1.021752, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.238058, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.245484, + "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" + ], + [ + 0.719467, + "\b\u001b[1mE\u001b[1m.\u001b[0m\u001b[39m" + ], + [ + 0.151468, + "\b\u001b[1m.\u001b[1mg\u001b[0m\u001b[39m" + ], + [ + 0.183213, + "\b\u001b[1mg\u001b[1m.\u001b[0m\u001b[39m" + ], + [ + 0.599958, + "\b\u001b[1m.\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.316279, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.166858, + "\b\u001b[1ma\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.551272, + "\b\u001b[1ms\u001b[1m a tar archive.\u001b[0m\u001b[39m" + ], + [ + 0.938861, + "\u001b[?1l\u001b>" + ], + [ + 0.000638, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000793, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001159, + "\u001b]7;\u0007" + ], + [ + 0.000867, + "\u001b]7;\u0007" + ], + [ + 9.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000282, + "\u001b[?2004h" + ], + [ + 0.860998, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.189263, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.11245, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.133531, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.62438, + " " + ], + [ + 0.295845, + "e" + ], + [ + 0.165874, + "x" + ], + [ + 0.180501, + "p" + ], + [ + 0.166254, + "o" + ], + [ + 0.27793, + "r" + ], + [ + 0.113477, + "t" + ], + [ + 0.46559, + "-" + ], + [ + 0.34577, + "t" + ], + [ + 0.148398, + "a" + ], + [ + 0.17144, + "r" + ], + [ + 0.920527, + " " + ], + [ + 0.40208, + "-" + ], + [ + 0.108683, + "-" + ], + [ + 0.326944, + "p" + ], + [ + 0.195982, + "r" + ], + [ + 0.175632, + "o" + ], + [ + 0.229442, + "g" + ], + [ + 0.133505, + "r" + ], + [ + 0.171995, + "e" + ], + [ + 0.244119, + "s" + ], + [ + 0.154514, + "s" + ], + [ + 0.579295, + " " + ], + [ + 0.575201, + ":" + ], + [ + 0.112098, + ":" + ], + [ + 0.355392, + "b" + ], + [ + 0.110008, + "a" + ], + [ + 0.172393, + "c" + ], + [ + 0.080739, + "k" + ], + [ + 0.134163, + "u" + ], + [ + 0.221434, + "p" + ], + [ + 0.276712, + "2" + ], + [ + 0.6747, + " " + ], + [ + 0.372614, + "b" + ], + [ + 0.09319, + "a" + ], + [ + 0.152876, + "c" + ], + [ + 0.089531, + "k" + ], + [ + 0.150747, + "u" + ], + [ + 0.233879, + "p" + ], + [ + 0.273301, + "." + ], + [ + 0.354416, + "t" + ], + [ + 0.107034, + "a" + ], + [ + 0.144993, + "r" + ], + [ + 0.463039, + "." + ], + [ + 0.352906, + "g" + ], + [ + 0.133262, + "z" + ], + [ + 1.083854, + "\u001b[?1l\u001b>" + ], + [ + 0.004197, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001109, + "\u001b]2;borg export-tar --progress ::backup2 backup.tar.gz\u0007" + ], + [ + 4.7e-05, + "\u001b]1;borg\u0007" + ], + [ + 0.679042, + "Calculating size \r" + ], + [ + 0.036244, + " 0.0% Processing: Wallpaper/bigcollection/Macaws_USBingImage.jpg \r" + ], + [ + 0.020857, + " 0.1% Processing: Wallpaper/bigcollection/_A...r_the_town_of_Herrischried.jpg\r" + ], + [ + 0.030544, + " 0.2% Processing: Wallpaper/bigcollection/_A...her_Atlas__Marokko____Doug.jpg\r" + ], + [ + 0.030864, + " 0.3% Processing: Wallpaper/bigcollection/_A...ssar__S_d-Sulawesi__Indone.jpg\r" + ], + [ + 0.030144, + " 0.4% Processing: Wallpaper/bigcollection/_A..._N_he_von_Timimoun__Algeri.jpg\r" + ], + [ + 0.027643, + " 0.5% Processing: Wallpaper/bigcollection/_A...t_kleinen__aus_Servietten_.jpg\r" + ], + [ + 0.03121, + " 0.6% Processing: Wallpaper/bigcollection/_A...g_Norderoog__small_island_.jpg\r" + ], + [ + 0.031343, + " 0.7% Processing: Wallpaper/bigcollection/_A...im_Snowdonia-Nationalpark_.jpg\r" + ], + [ + 0.031862, + " 0.8% Processing: Wallpaper/bigcollection/_A...l_an_einem_Wasserloch_im_Q.jpg\r" + ], + [ + 0.034847, + " 0.9% Processing: Wallpaper/bigcollection/_A...nten____James_HagerOffset_.jpg\r" + ], + [ + 0.033989, + " 1.0% Processing: Wallpaper/bigcollection/_A...nen__Masai_Mara_National_R.jpg\r" + ], + [ + 0.027388, + " 1.1% Processing: Wallpaper/bigcollection/_A...ard_im_Londolozi-Wildreser.jpg\r" + ], + [ + 0.026632, + " 1.2% Processing: Wallpaper/bigcollection/_A...orning_fog__Aidling__Pfaff.jpg\r" + ], + [ + 0.030864, + " 1.3% Processing: Wallpaper/bigcollection/_A...hutzgebiet_Lewa_Wildlife_C.jpg\r" + ], + [ + 0.029943, + " 1.4% Processing: Wallpaper/bigcollection/_A...in_Delhi__Indien____AirPan.jpg\r" + ], + [ + 0.035404, + " 1.5% Processing: Wallpaper/bigcollection/_A...morial_Gardens_in_den_Dand.jpg\r" + ], + [ + 0.030931, + " 1.6% Processing: Wallpaper/bigcollection/_A...rthumberland__England____A.jpg\r" + ], + [ + 0.035605, + " 1.7% Processing: Wallpaper/bigcollection/_A...berg__Bayern__Deutschland_.jpg\r" + ], + [ + 0.026827, + " 1.8% Processing: Wallpaper/bigcollection/_A...ns_am_Little_Missouri_Rive.jpg\r" + ], + [ + 0.030196, + " 1.9% Processing: Wallpaper/bigcollection/_A...toberfest_in_Munich__Ger 1.jpg\r" + ], + [ + 0.025763, + " 2.0% Processing: Wallpaper/bigcollection/_A...toberfest_in_Munich__Ger 2.jpg\r" + ], + [ + 0.025306, + " 2.1% Processing: Wallpaper/bigcollection/_A...toberfest_in_Munich__Ger 3.jpg\r" + ], + [ + 0.027286, + " 2.2% Processing: Wallpaper/bigcollection/_A...Oktoberfest_in_Munich__Ger.jpg\r" + ], + [ + 0.02806, + " 2.3% Processing: Wallpaper/bigcollection/_A..._Florida-Scheibenanemone__.jpg\r" + ], + [ + 0.032994, + " 2.4% Processing: Wallpaper/bigcollection/_A...n__Nationalpark_Ankarafant.jpg\r" + ], + [ + 0.033538, + " 2.5% Processing: Wallpaper/bigcollection/_A..._der_N_he_von_Page__Arizon.jpg\r" + ], + [ + 0.030034, + " 2.6% Processing: Wallpaper/bigcollection/_A...r_Inselgruppe_L_archipel_d.jpg\r" + ], + [ + 0.030477, + " 2.7% Processing: Wallpaper/bigcollection/_A...land____Hercules_MilasAlam.jpg\r" + ], + [ + 0.033376, + " 2.8% Processing: Wallpaper/bigcollection/_A...maguchiFlickr_OpenGetty_Im.jpg\r" + ], + [ + 0.032919, + " 2.9% Processing: Wallpaper/bigcollection/_A...imetersubmillimeter_Array_.jpg\r" + ], + [ + 0.027034, + " 4.7% Processing: Wallpaper/bigcollection/_B...rairie_Creek_Redwoods_Stat.jpg\r" + ], + [ + 0.034892, + " 4.8% Processing: Wallpaper/bigcollection/_B...__Montana__USA____Jeff_Kro.jpg\r" + ], + [ + 0.031042, + " 4.9% Processing: Wallpaper/bigcollection/_B...gatta__Golf_von_Triest__It.jpg\r" + ], + [ + 0.030521, + " 5.0% Processing: Wallpaper/bigcollection/_B...and__Schleswig-Holstein__D.jpg\r" + ], + [ + 0.028755, + " 5.1% Processing: Wallpaper/bigcollection/_B..._Islands__Irland____Bart_B.jpg\r" + ], + [ + 0.031129, + " 5.2% Processing: Wallpaper/bigcollection/_B...e_im_Glacier-Nationalpark_.jpg\r" + ], + [ + 0.032588, + " 5.3% Processing: Wallpaper/bigcollection/_B...Nationalpark_Bayerischer_W.jpg\r" + ], + [ + 0.025077, + " 5.4% Processing: Wallpaper/bigcollection/_B...Arena_bei_Nacht__Stockholm.jpg\r" + ], + [ + 0.027803, + " 5.5% Processing: Wallpaper/bigcollection/_B...ner_Fernsehturm_w_hrend_de.jpg\r" + ], + [ + 0.031262, + " 5.6% Processing: Wallpaper/bigcollection/_B...nd__Bayern__Deutschland_mi.jpg\r" + ], + [ + 0.031721, + " 5.7% Processing: Wallpaper/bigcollection/_B...er__Schwarzwald__Baden-W_r.jpg\r" + ], + [ + 0.032768, + " 5.8% Processing: Wallpaper/bigcollection/_B...ebirge_oberhalb_der_Dad_s-.jpg\r" + ], + [ + 0.030763, + " 5.9% Processing: Wallpaper/bigcollection/_B...ngerburgbahn__Innsbruck___.jpg\r" + ], + [ + 0.028673, + " 7.6% Processing: Wallpaper/bigcollection/_B...rn_des__Wilson_Stump___ein.jpg\r" + ], + [ + 0.029182, + " 7.7% Processing: Wallpaper/bigcollection/_B...t_Jefferson-Wildschutzgebi.jpg\r" + ], + [ + 0.029225, + " 11.2% Processing: Wallpaper/bigcollection/_B...Saloum-Delta__Senegal____B.jpg\r" + ], + [ + 0.030837, + " 11.3% Processing: Wallpaper/bigcollection/_B..._Venedig__Italien____Digit.jpg\r" + ], + [ + 0.034033, + " 11.4% Processing: Wallpaper/bigcollection/_B..._Koblenz_und_Trier__Rheinl.jpg\r" + ], + [ + 0.028958, + " 11.5% Processing: Wallpaper/bigcollection/_B..._Baden-W_rttemberg__Deutsc.jpg\r" + ], + [ + 0.025933, + " 11.6% Processing: Wallpaper/bigcollection/_B..._Bisingen__Baden-W_rttembe.jpg\r" + ], + [ + 0.030318, + " 11.7% Processing: Wallpaper/bigcollection/_B...in_Koknese__Lettland____An.jpg\r" + ], + [ + 0.029535, + " 11.8% Processing: Wallpaper/bigcollection/_C...__Deutschland____R_diger_H.jpg\r" + ], + [ + 0.032432, + " 11.9% Processing: Wallpaper/bigcollection/_C...Toulouse__D_partement_Haut.jpg\r" + ], + [ + 0.032966, + " 12.0% Processing: Wallpaper/bigcollection/_C...pring__Germany____Boris_St.jpg\r" + ], + [ + 0.024881, + " 12.1% Processing: Wallpaper/bigcollection/_C...pring__Germany____Boris_St.jpg\r" + ], + [ + 0.02818, + " 12.2% Processing: Wallpaper/bigcollection/_C...Mallorca__Balearische_Inse.jpg\r" + ], + [ + 0.029353, + " 12.3% Processing: Wallpaper/bigcollection/_C...A__ESA__N._Smith__Universi.jpg\r" + ], + [ + 0.03626, + " 12.4% Processing: Wallpaper/bigcollection/_C...gebr_cke_bei_Ballintoy__Co.jpg\r" + ], + [ + 0.025838, + " 12.5% Processing: Wallpaper/bigcollection/_C...gebr_cke_bei_Ballintoy__Co.jpg\r" + ], + [ + 0.027176, + " 12.6% Processing: Wallpaper/bigcollection/_C...lona__Spanien____Nora_De_A.jpg\r" + ], + [ + 0.0298, + " 12.7% Processing: Wallpaper/bigcollection/_C...rcia__Nationalpark_Monti_S.jpg\r" + ], + [ + 0.027672, + " 12.8% Processing: Wallpaper/bigcollection/_C...vinz_Potenza__Italien____F.jpg\r" + ], + [ + 0.032259, + " 12.9% Processing: Wallpaper/bigcollection/_C...semite-Nationalpark__Kalif.jpg\r" + ], + [ + 0.031451, + " 13.0% Processing: Wallpaper/bigcollection/_C...um_Ludwig__Cologne__German.jpg\r" + ], + [ + 0.030096, + " 13.1% Processing: Wallpaper/bigcollection/_C..._Ludwig__Cologne__North_ 1.jpg\r" + ], + [ + 0.028235, + " 15.1% Processing: Wallpaper/bigcollection/_D...n_Hannover_bei_Nacht____Ma.jpg\r" + ], + [ + 0.028761, + " 15.2% Processing: Wallpaper/bigcollection/_D...rieb_befindliche_Opernhaus.jpg\r" + ], + [ + 0.027439, + " 15.3% Processing: Wallpaper/bigcollection/_D...esert_View_Watchtower__Gra.jpg\r" + ], + [ + 0.028598, + " 15.4% Processing: Wallpaper/bigcollection/_D..._Provinz_Shaanxi__Volksrep.jpg\r" + ], + [ + 0.031617, + " 15.5% Processing: Wallpaper/bigcollection/_D...gr__t_den_Hund_Sudo_in_Mel.jpg\r" + ], + [ + 0.032865, + " 17.5% Processing: Wallpaper/bigcollection/_D...s_du_Tarn__Nationalpark_Ce.jpg\r" + ], + [ + 0.031736, + " 17.6% Processing: Wallpaper/bigcollection/_D..._he_von_Sens__D_partement_.jpg\r" + ], + [ + 0.030474, + " 17.7% Processing: Wallpaper/bigcollection/_D...__Wales__Vereinigtes_K_nig.jpg\r" + ], + [ + 0.026112, + " 20.5% Processing: Wallpaper/bigcollection/_E...Junges_schnuppert_an_einer.jpg\r" + ], + [ + 0.027898, + " 20.6% Processing: Wallpaper/bigcollection/_E...chen_versteckt_sich_in_ein.jpg\r" + ], + [ + 0.027202, + " 20.7% Processing: Wallpaper/bigcollection/_E...r_Frosch_in_einem_Wassertr.jpg\r" + ], + [ + 0.027615, + " 20.8% Processing: Wallpaper/bigcollection/_E...ekorierter_Saguaro-Kaktus_.jpg\r" + ], + [ + 0.028446, + " 20.9% Processing: Wallpaper/bigcollection/_E...e__berquert_den_Luangwa-Fl.jpg\r" + ], + [ + 0.031808, + " 21.0% Processing: Wallpaper/bigcollection/_E...ngstunnel_zur_Felsenkirche.jpg\r" + ], + [ + 0.031065, + " 22.7% Processing: Wallpaper/bigcollection/_E...n_Koblenz_and_Trier__Germa.jpg\r" + ], + [ + 0.033059, + " 22.8% Processing: Wallpaper/bigcollection/_E...n_Angola_und_Namibia____Fr.jpg\r" + ], + [ + 0.035115, + " 22.9% Processing: Wallpaper/bigcollection/_E...r_Olympischen_Spiele_1896_.jpg\r" + ], + [ + 0.032507, + " 23.0% Processing: Wallpaper/bigcollection/_E..._Fr_hlingskrokus__Almwiese.jpg\r" + ], + [ + 0.028219, + " 23.1% Processing: Wallpaper/bigcollection/_E...in_der_Meeresbucht_Cathedr.jpg\r" + ], + [ + 0.029551, + " 23.2% Processing: Wallpaper/bigcollection/_E..._Nationalpark_Bayerischer_.jpg\r" + ], + [ + 0.02746, + " 23.3% Processing: Wallpaper/bigcollection/_E...im_Nationalpark_Bayerische.jpg\r" + ], + [ + 0.028081, + " 23.4% Processing: Wallpaper/bigcollection/_E...im__umava-Nationalpark__Ts.jpg\r" + ], + [ + 0.027796, + " 23.5% Processing: Wallpaper/bigcollection/_E..._Emsland__Germany____Erh 1.jpg\r" + ], + [ + 0.026053, + " 25.4% Processing: Wallpaper/bigcollection/_F...chersee_J_kuls_rl_n__Islan.jpg\r" + ], + [ + 0.029312, + " 25.5% Processing: Wallpaper/bigcollection/_F...Yellowstone_Nationalpark__.jpg\r" + ], + [ + 0.029189, + " 25.6% Processing: Wallpaper/bigcollection/_F...yi__Provinz_Phang-nga__Tha.jpg\r" + ], + [ + 0.029535, + " 25.7% Processing: Wallpaper/bigcollection/_F..._Tree_River__Kitikmeot_Reg.jpg\r" + ], + [ + 0.031935, + " 25.8% Processing: Wallpaper/bigcollection/_F...ystad__Niederlande____Erns.jpg\r" + ], + [ + 0.034076, + " 25.9% Processing: Wallpaper/bigcollection/_F...kyline_von_Baku__Aserbaids.jpg\r" + ], + [ + 0.028655, + " 26.0% Processing: Wallpaper/bigcollection/_F..._New_York_City__Bundesstaa.jpg\r" + ], + [ + 0.030152, + " 26.1% Processing: Wallpaper/bigcollection/_F...wals__Cierva_Cove__Antarkt.jpg\r" + ], + [ + 0.030983, + " 26.2% Processing: Wallpaper/bigcollection/_F..._des_Norman_River__Queensl.jpg\r" + ], + [ + 0.027019, + " 27.4% Processing: Wallpaper/bigcollection/_G..._Ger_llhang_im_Rondane-Nat.jpg\r" + ], + [ + 0.027058, + " 27.5% Processing: Wallpaper/bigcollection/_G...tzgebiet_Sacramento_Nation.jpg\r" + ], + [ + 0.038515, + " 27.6% Processing: Wallpaper/bigcollection/_G...Villandry__Loiretal__Frank.jpg\r" + ], + [ + 0.024219, + " 27.7% Processing: Wallpaper/bigcollection/_G...Villandry__Loiretal__Frank.jpg\r" + ], + [ + 0.028063, + " 27.8% Processing: Wallpaper/bigcollection/_G...__Champion-Insel__Floreana.jpg\r" + ], + [ + 0.030237, + " 27.9% Processing: Wallpaper/bigcollection/_G...__R_bida__Gal_pagosinseln_.jpg\r" + ], + [ + 0.031455, + " 28.0% Processing: Wallpaper/bigcollection/_G...c-Nationalpark__Alaska__US.jpg\r" + ], + [ + 0.028409, + " 28.1% Processing: Wallpaper/bigcollection/_G...um_Bridge__Newcastle_upon_.jpg\r" + ], + [ + 0.031595, + " 28.2% Processing: Wallpaper/bigcollection/_G..._Kanal_in_Venedig__Italien.jpg\r" + ], + [ + 0.031079, + " 28.3% Processing: Wallpaper/bigcollection/_G..._Rock_Canyon__Waterton-Lak.jpg\r" + ], + [ + 0.028272, + " 30.5% Processing: Wallpaper/bigcollection/_G...oos_in_der_Gro_aufnahme___.jpg\r" + ], + [ + 0.034208, + " 30.6% Processing: Wallpaper/bigcollection/_G...iesel__Bayern__Deutschland.jpg\r" + ], + [ + 0.034016, + " 30.7% Processing: Wallpaper/bigcollection/_G...__ber_dem_Thunersee__Berne.jpg\r" + ], + [ + 0.0292, + " 30.8% Processing: Wallpaper/bigcollection/_G...ell-St.-Elias-Nationalpark.jpg\r" + ], + [ + 0.024942, + " 32.8% Processing: Wallpaper/bigcollection/_G..._bei_Mettlach__Saarland__D.jpg\r" + ], + [ + 0.031677, + " 32.9% Processing: Wallpaper/bigcollection/_G...ngxia-Zuchtstation__Ya_an_.jpg\r" + ], + [ + 0.031108, + " 33.9% Processing: Wallpaper/bigcollection/_H...kaido__Japan____JTB_Media_.jpg\r" + ], + [ + 0.030964, + " 35.9% Processing: Wallpaper/bigcollection/_H...ew_RussellVisuals_Unlimite.jpg\r" + ], + [ + 0.026577, + " 38.9% Processing: Wallpaper/bigcollection/_I...eutschen_Doms__Gendarmenma.jpg\r" + ], + [ + 0.031898, + " 39.0% Processing: Wallpaper/bigcollection/_I...ukuoka_Tower__Fukuoka__Jap.jpg\r" + ], + [ + 0.031693, + " 39.1% Processing: Wallpaper/bigcollection/_I...__Bermeo__Provinz_Bizkaia_.jpg\r" + ], + [ + 0.026825, + " 39.2% Processing: Wallpaper/bigcollection/_I...P_tzcuaro-See__Bundesstaat.jpg\r" + ], + [ + 0.030749, + " 41.0% Processing: Wallpaper/bigcollection/_J...ia-Nationalpark__Maine__US.jpg\r" + ], + [ + 0.032301, + " 41.1% Processing: Wallpaper/bigcollection/_J..._im_Moremi_Game_Reserve__O.jpg\r" + ], + [ + 0.031689, + " 42.2% Processing: Wallpaper/bigcollection/_K...n_in_der_Antarktis____Jan_.jpg\r" + ], + [ + 0.029222, + " 42.3% Processing: Wallpaper/bigcollection/_K...e_Washington__Antarktis___.jpg\r" + ], + [ + 0.174039, + " 42.4% Processing: Wallpaper/bigcollection/_K...K_ken__Snow_Hill_Island__A.jpg\r" + ], + [ + 0.03322, + " 42.5% Processing: Wallpaper/bigcollection/_K...SCO-Welterbest_tte__Trier_.jpg\r" + ], + [ + 0.031657, + " 43.4% Processing: Wallpaper/bigcollection/_K...ufort__South_Carolina__USA.jpg\r" + ], + [ + 0.026738, + " 43.5% Processing: Wallpaper/bigcollection/_K...chfang_vor_Port_St._Johns_.jpg\r" + ], + [ + 0.033834, + " 44.4% Processing: Wallpaper/bigcollection/_K...eide__Schottisches_Hochlan.jpg\r" + ], + [ + 0.034061, + " 44.5% Processing: Wallpaper/bigcollection/_K...m_Schlossgarten_Schwetzing.jpg\r" + ], + [ + 0.033845, + " 44.6% Processing: Wallpaper/bigcollection/_K...dscha__Kachetien__Georgien.jpg\r" + ], + [ + 0.031383, + " 44.7% Processing: Wallpaper/bigcollection/_K..._Baden-W_rttemberg__Deutsc.jpg\r" + ], + [ + 0.027515, + " 44.8% Processing: Wallpaper/bigcollection/_K..._Zanskar__Region_Ladakh__B.jpg\r" + ], + [ + 0.031935, + " 44.9% Processing: Wallpaper/bigcollection/_K...Meteora__Griechenland____S.jpg\r" + ], + [ + 0.030994, + " 45.0% Processing: Wallpaper/bigcollection/_K...-Ville__Belgien____Patty_P.jpg\r" + ], + [ + 0.031632, + " 46.8% Processing: Wallpaper/bigcollection/_L...ionalpark__Simbabwe____Jer.jpg\r" + ], + [ + 0.032645, + " 46.9% Processing: Wallpaper/bigcollection/_L...Hochland_von_Cuenca__Auton.jpg\r" + ], + [ + 0.028682, + " 47.0% Processing: Wallpaper/bigcollection/_L...Hochland_von_Cuenca__Auton.jpg\r" + ], + [ + 0.030087, + " 47.1% Processing: Wallpaper/bigcollection/_L...__Axel_Flasbarth500px_____.jpg\r" + ], + [ + 0.030684, + " 47.2% Processing: Wallpaper/bigcollection/_L...athedrale_von_Chartres__Fr.jpg\r" + ], + [ + 0.029522, + " 47.3% Processing: Wallpaper/bigcollection/_L...und_Aiguilles_de_Chamonix_.jpg\r" + ], + [ + 0.032174, + " 47.4% Processing: Wallpaper/bigcollection/_L...Nutthavood_Punpeng500px___.jpg\r" + ], + [ + 0.029075, + " 47.5% Processing: Wallpaper/bigcollection/_L...nd__Great_Barrier_Reef__Au.jpg\r" + ], + [ + 0.028973, + " 47.6% Processing: Wallpaper/bigcollection/_L...__Insel_Corvo__Portugal___.jpg\r" + ], + [ + 0.030047, + " 47.7% Processing: Wallpaper/bigcollection/_L...ationalpark__British_Colum.jpg\r" + ], + [ + 0.031497, + " 49.3% Processing: Wallpaper/bigcollection/_L...hof__Great_Court__des_Brit.jpg\r" + ], + [ + 0.029466, + " 49.4% Processing: Wallpaper/bigcollection/_L...em_Wald_auf_der_Insel_Shik.jpg\r" + ], + [ + 0.025178, + " 49.5% Processing: Wallpaper/bigcollection/_L...er_K_ste_von_Ixtapa_Zihuat.jpg\r" + ], + [ + 0.030228, + " 49.6% Processing: Wallpaper/bigcollection/_L...e_Itapu__in_Salvador__Bahi.jpg\r" + ], + [ + 0.027644, + " 49.7% Processing: Wallpaper/bigcollection/_L...l_Point_in_der_N_he_von_Po.jpg\r" + ], + [ + 0.026513, + " 49.8% Processing: Wallpaper/bigcollection/_L...eversand__Westerhever__Sch.jpg\r" + ], + [ + 0.032316, + " 49.9% Processing: Wallpaper/bigcollection/_L...i__Provinz_Jiangsu__Volksr.jpg\r" + ], + [ + 0.026983, + " 50.0% Processing: Wallpaper/bigcollection/_L...g__aufgenommen_von_der_Int.jpg\r" + ], + [ + 0.03107, + " 51.7% Processing: Wallpaper/bigcollection/_M..._Cay__Exuma__Bahamas____Ji.jpg\r" + ], + [ + 0.028123, + " 51.8% Processing: Wallpaper/bigcollection/_M...ationalpark_Jardines_de_la.jpg\r" + ], + [ + 0.028547, + " 51.9% Processing: Wallpaper/bigcollection/_M...au____WaterFrameAlamy_____.jpg\r" + ], + [ + 0.030092, + " 53.1% Processing: Wallpaper/bigcollection/_M...ands-Nationalpark__Utah__U.jpg\r" + ], + [ + 0.027589, + " 53.2% Processing: Wallpaper/bigcollection/_M...useum_in_den_Wolken__Monte.jpg\r" + ], + [ + 0.029779, + " 53.3% Processing: Wallpaper/bigcollection/_M...Plaza_de_la_Encarnaci_n__S.jpg\r" + ], + [ + 0.031154, + " 54.6% Processing: Wallpaper/bigcollection/_M...lmie_National_Forest__Bund.jpg\r" + ], + [ + 0.03317, + " 54.7% Processing: Wallpaper/bigcollection/_M...t_Edziza_Provincial_Park__.jpg\r" + ], + [ + 0.031631, + " 54.8% Processing: Wallpaper/bigcollection/_M...__Washington__USA____Diane.jpg\r" + ], + [ + 0.025722, + " 56.1% Processing: Wallpaper/bigcollection/_N..._K_ste_des_Atlantischen_Oz.jpg\r" + ], + [ + 0.029888, + " 56.2% Processing: Wallpaper/bigcollection/_N...hee__Schiras__Iran____R.Cr.jpg\r" + ], + [ + 0.022761, + " 57.5% Processing: Wallpaper/bigcollection/_N...Fischotter_im_Yellowstone-.jpg\r" + ], + [ + 0.030469, + " 57.6% Processing: Wallpaper/bigcollection/_N..._Baumstachler____Minden_Pi.jpg\r" + ], + [ + 0.032258, + " 58.9% Processing: Wallpaper/bigcollection/_O...-Park__Bomarzo__Italien___.jpg\r" + ], + [ + 0.028556, + " 59.0% Processing: Wallpaper/bigcollection/_O...-Park__Bomarzo__Italien___.jpg\r" + ], + [ + 0.029665, + " 60.4% Processing: Wallpaper/bigcollection/_P..._der_Boardman_Tree_Farm__B.jpg\r" + ], + [ + 0.030072, + " 60.5% Processing: Wallpaper/bigcollection/_P...o-Ebene__Italien____Eddy_G.jpg\r" + ], + [ + 0.034601, + " 60.6% Processing: Wallpaper/bigcollection/_P...nem_Karnevalswagen_beim_Ro.jpg\r" + ], + [ + 0.029305, + " 61.9% Processing: Wallpaper/bigcollection/_P...der_argentinischen_Atlanti.jpg\r" + ], + [ + 0.03045, + " 62.0% Processing: Wallpaper/bigcollection/_P...m__Pilsum__Niedersachsen__.jpg\r" + ], + [ + 0.02941, + " 63.4% Processing: Wallpaper/bigcollection/_P...rk_Torres_del_Paine__Chile.jpg\r" + ], + [ + 0.033345, + " 63.5% Processing: Wallpaper/bigcollection/_P...i-Nationalpark__New_South_.jpg\r" + ], + [ + 0.031818, + " 64.9% Processing: Wallpaper/bigcollection/_R...ationalpark_Sarek__Schwede.jpg\r" + ], + [ + 0.025656, + " 65.0% Processing: Wallpaper/bigcollection/_R...ationalpark_Sarek__Schwede.jpg\r" + ], + [ + 0.030751, + " 66.6% Processing: Wallpaper/bigcollection/_R...nyang__Provinz_Yunnan__Chi.jpg\r" + ], + [ + 0.030313, + " 66.7% Processing: Wallpaper/bigcollection/_R...n_Ludwig_XIV._auf_dem_Plac.jpg\r" + ], + [ + 0.032915, + " 68.6% Processing: Wallpaper/bigcollection/_R...r____Getty_Images______Bin.jpg\r" + ], + [ + 0.029504, + " 70.1% Processing: Wallpaper/bigcollection/_S...tional_Park__Germany____ 3.jpg\r" + ], + [ + 0.026571, + " 70.2% Processing: Wallpaper/bigcollection/_S...tional_Park__Germany____ 4.jpg\r" + ], + [ + 0.032136, + " 71.7% Processing: Wallpaper/bigcollection/_S...e_t_sich_als_Wasserfall_vo.jpg\r" + ], + [ + 0.032883, + " 72.6% Processing: Wallpaper/bigcollection/_S...riehunde_im_Wind_Cave_Nati.jpg\r" + ], + [ + 0.031602, + " 72.7% Processing: Wallpaper/bigcollection/_S...erkstatt__Hexenlochm_hle__.jpg\r" + ], + [ + 0.030634, + " 73.6% Processing: Wallpaper/bigcollection/_S...en_in_der_Son_Doong-H_hle_.jpg\r" + ], + [ + 0.027026, + " 74.5% Processing: Wallpaper/bigcollection/_S..._at_sunset__Attendorn__Sau.jpg\r" + ], + [ + 0.038777, + " 75.4% Processing: Wallpaper/bigcollection/_S..._Dartmoor-Nationalpark__De.jpg\r" + ], + [ + 0.027422, + " 75.5% Processing: Wallpaper/bigcollection/_S..._der_Halong-Bucht__Vietnam.jpg\r" + ], + [ + 0.027539, + " 76.3% Processing: Wallpaper/bigcollection/_S...em_See__Bergpark_Wilhelmsh.jpg\r" + ], + [ + 0.031058, + " 76.4% Processing: Wallpaper/bigcollection/_S...ge_in_den_Ausl_ufern_der_R.jpg\r" + ], + + [ + 0.036506, + " 77.6% Processing: Wallpaper/bigcollection/_S..._Geothermalgebiet_Haukadal.jpg\r" + ], + [ + 0.025063, + " 77.7% Processing: Wallpaper/bigcollection/_S...ampagne-Ardennes__Frankrei.jpg\r" + ], + [ + 0.029054, + " 77.8% Processing: Wallpaper/bigcollection/_S...r__ber_West_Point__Nebrask.jpg\r" + ], + [ + 0.028908, + " 77.9% Processing: Wallpaper/bigcollection/_S...n-Bodenstation__Longyearby.jpg\r" + ], + [ + 0.029276, + " 78.0% Processing: Wallpaper/bigcollection/_S..._und_Solidarit_t_Kerzen__K.jpg\r" + ], + [ + 0.024812, + " 79.0% Processing: Wallpaper/bigcollection/_T..._Blatt_eines_Per_ckenstrau.jpg\r" + ], + [ + 0.031898, + " 80.0% Processing: Wallpaper/bigcollection/_T...er_Bavaria__Germany____F 3.jpg\r" + ], + [ + 0.029189, + " 80.1% Processing: Wallpaper/bigcollection/_T...pper_Bavaria__Germany____F.jpg\r" + ], + [ + 0.028065, + " 81.2% Processing: Wallpaper/bigcollection/_T...Image_BrokerRex_Features 1.jpg\r" + ], + [ + 0.03116, + " 81.3% Processing: Wallpaper/bigcollection/_T...n__Baden-W_rttemberg__Deut.jpg\r" + ], + [ + 0.026524, + " 82.3% Processing: Wallpaper/bigcollection/_U..._seltene_Blattschwanzgecko.jpg\r" + ], + [ + 0.028383, + " 82.4% Processing: Wallpaper/bigcollection/_V...en_und_Altstadt_von_Chania.jpg\r" + ], + [ + 0.032476, + " 83.5% Processing: Wallpaper/bigcollection/_V..._Hirta__St._Kilda__Schottl.jpg\r" + ], + [ + 0.030701, + " 84.7% Processing: Wallpaper/bigcollection/_W...wald__Insel_Sula__Solund__.jpg\r" + ], + [ + 0.034129, + " 84.8% Processing: Wallpaper/bigcollection/_W...nschafe__Kanton_Wallis__ 1.jpg\r" + ], + [ + 0.03033, + " 85.8% Processing: Wallpaper/bigcollection/_W...ionalpark_Plitvicer_Seen__.jpg\r" + ], + + [ + 0.031761, + " 87.2% Processing: Wallpaper/bigcollection/_W..._N_he_von_Ca_amares__Provi.jpg\r" + ], + [ + 0.031627, + " 87.3% Processing: Wallpaper/bigcollection/_W..._N_he_von_Cuenca__Spanien_.jpg\r" + ], + [ + 0.024242, + " 88.3% Processing: Wallpaper/bigcollection/_W...jeu__D_partement_Rh_ne__Re.jpg\r" + ], + [ + 0.027362, + " 89.3% Processing: Wallpaper/bigcollection/_W...guna_Colorada__Bolivien___.jpg\r" + ], + [ + 0.031448, + " 90.5% Processing: Wallpaper/bigcollection/_Z..._von_Autobahnen_in_Bangkok.jpg\r" + ], + [ + 0.027535, + " 90.6% Processing: Wallpaper/bigcollection/_Z...abara-Bucht__Rio_de_Janeir.jpg\r" + ], + [ + 0.025329, + " 92.1% Processing: Wallpaper/bigcollection/__...ptur_der_Landart-K_nstleri.jpg\r" + ], + [ + 0.044106, + " 92.2% Processing: Wallpaper/bigcollection/__...__Magic_Mountain_-Landmark.jpg\r" + ], + [ + 0.03068, + " 93.5% Processing: Wallpaper/bigcollection/_F...rte_Marina_Bay_zum_50._Nat.jpg\r" + ], + [ + 0.031039, + " 93.6% Processing: Wallpaper/bigcollection/_H...ing_Crane_Pond_Conservancy.jpg\r" + ], + [ + 0.020685, + " 95.0% Processing: Wallpaper/2048example/Palo...t_by_Beth___Jeremy_Jonkman.jpg\r" + ], + [ + 0.019863, + " 96.3% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" + ], + [ + 0.056069, + " 96.4% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" + ], + [ + 0.049869, + " 97.4% Processing: Wallpaper/evenmore/ChipDE 06.jpg \r" + ], + [ + 0.021021, + " 97.5% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" + ], + [ + 0.019135, + " 98.4% Processing: Wallpaper/evenmore/ChipDE ...jpg \r" + ], + [ + 0.021483, + " 99.6% Processing: Wallpaper/deer.jpg ... \r" + ], + [ + 0.021593, + " 99.7% Processing: Wallpaper/deer.jpg ... \r" + ], + [ + 0.02037, + " 99.8% Processing: Wallpaper/deer.jpg ... \r" + ], + [ + 0.027858, + " 99.9% Processing: Wallpaper/deer.jpg ... \r" + ], + [ + 0.020864, + " \r" + ], + [ + 0.077955, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001068, + "\u001b]7;\u0007" + ], + [ + 0.000836, + "\u001b]7;\u0007" + ], + [ + 0.000104, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000234, + "\u001b[?2004h" + ], + [ + 2.471911, + "\u001b[32ml\u001b[39m" + ], + [ + 0.102688, + "\b\u001b[32ml\u001b[32ms\u001b[39m" + ], + [ + 0.272296, + " " + ], + [ + 0.220114, + "-" + ], + [ + 0.157165, + "l" + ], + [ + 0.074368, + "a" + ], + [ + 0.353976, + "\u001b[?1l\u001b>" + ], + [ + 0.000755, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000778, + "\u001b]2;ls --color=tty -la\u0007\u001b]1;ls\u0007" + ], + [ + 0.001633, + "total 573616\r\n" + ], + [ + 1.9e-05, + "drwxr-xr-x. 4 rugk rugk 4096 Jul 16 18:56 \u001b[0m\u001b[38;5;33m.\u001b[0m\r\ndrwxr-x---. 55 rugk rugk 4096 Jul 16 18:57 \u001b[38;5;33m..\u001b[0m\r\ndrwx------. 2 rugk rugk 4096 Jul 14 21:57 \u001b[38;5;33mWallpaper\u001b[0m\r\ndrwxr-xr-x. 6 rugk rugk 4096 Jul 14 21:55 \u001b[38;5;33mWallpaper.orig\u001b[0m\r\n-rw-------. 1 rugk rugk 587361454 Jul 16 18:57 \u001b[38;5;9mbackup.tar.gz\u001b[0m\r\n" + ], + [ + 0.000404, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001103, + "\u001b]7;\u0007" + ], + [ + 0.000992, + "\u001b]7;\u0007" + ], + [ + 7.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000158, + "\u001b[?2004h" + ], + [ + 3.04506, + "\u001b[?1l\u001b>" + ], + [ + 0.000385, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000485, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001512, + "\u001b]7;\u0007" + ], + [ + 0.001245, + "\u001b]7;\u0007" + ], + [ + 6.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000129, + "\u001b[?1h\u001b=" + ], + [ + 0.000247, + "\u001b[?2004h" + ], + [ + 0.325892, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.228892, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.186392, + "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" + ], + [ + 0.112073, + "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.139024, + "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.151793, + "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.106484, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.147932, + "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.181458, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.137456, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.21885, + "\b\u001b[1m \u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.170788, + "\b\u001b[1me\u001b[1mv\u001b[0m\u001b[39m" + ], + [ + 0.133285, + "\b\u001b[1mv\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.28717, + "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.485291, + "\b\u001b[1mn\u001b[1m mount an archive or even the whole repository:\u001b[0m\u001b[39m" + ], + [ + 1.036008, + "\u001b[?1l\u001b>" + ], + [ + 0.001535, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001777, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.002934, + "\u001b]7;\u0007" + ], + [ + 0.002695, + "\u001b]7;\u0007" + ], + [ + 0.00014, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000881, + "\u001b[?2004h" + ], + [ + 1.264493, + "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m" + ], + [ + 0.158178, + "\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[0m\u001b[39m" + ], + [ + 0.129344, + "\b\b\u001b[1m\u001b[31mm\u001b[1m\u001b[31mk\u001b[1m\u001b[31md\u001b[0m\u001b[39m" + ], + [ + 0.153746, + "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" + ], + [ + 0.106254, + "\b\b\b\b\u001b[0m\u001b[32mm\u001b[0m\u001b[32mk\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[32mr\u001b[39m" + ], + [ + 0.178794, + " " + ], + [ + 0.328222, + "\u001b[4m/\u001b[24m" + ], + [ + 0.202794, + "\b\u001b[4m/\u001b[4mt\u001b[24m" + ], + [ + 0.246443, + "\b\u001b[4mt\u001b[4mm\u001b[24m" + ], + [ + 0.207634, + "\b\u001b[4mm\u001b[4mp\u001b[24m" + ], + [ + 0.88273, + "\b\u001b[4mp\u001b[4m/\u001b[24m" + ], + [ + 0.339887, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.210076, + "\b\u001b[4mm\u001b[4mo\u001b[24m" + ], + [ + 0.16667, + "\b\b\b\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp\u001b[24m/\u001b[24mm\u001b[24mou" + ], + [ + 0.141564, + "n" + ], + [ + 0.184, + "t" + ], + [ + 1.4607, + "\u001b[?1l\u001b>" + ], + [ + 0.001306, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000794, + "\u001b]2;mkdir /tmp/mount\u0007" + ], + [ + 6.6e-05, + "\u001b]1;mkdir\u0007" + ], + [ + 0.00176, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00142, + "\u001b]7;\u0007" + ], + [ + 0.001308, + "\u001b]7;\u0007" + ], + [ + 7.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " + ], + [ + 1.3e-05, + "\u001b[K" + ], + [ + 9.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000273, + "\u001b[?2004h" + ], + [ + 1.09686, + "\u001b[4mb\u001b[24m" + ], + [ + 0.187046, + "\b\u001b[24m\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.10907, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.12414, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.187573, + " " + ], + [ + 0.229364, + "m" + ], + [ + 0.195942, + "o" + ], + [ + 0.183861, + "u" + ], + [ + 0.138559, + "n" + ], + [ + 0.207537, + "t" + ], + [ + 1.01571, + " " + ], + [ + 0.55086, + ":" + ], + [ + 0.110713, + ":" + ], + [ + 0.27265, + " " + ], + [ + 0.462869, + "\u001b[4m/\u001b[24m" + ], + [ + 0.795464, + "\b\u001b[4m/\u001b[4mt\u001b[24m" + ], + [ + 0.295092, + "\b\u001b[4mt\u001b[4mm\u001b[24m" + ], + [ + 0.200509, + "\b\u001b[4mm\u001b[4mp\u001b[24m" + ], + [ + 0.878464, + "\b\u001b[4mp\u001b[4m/\u001b[24m" + ], + [ + 0.306666, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.24341, + "\b\u001b[4mm\u001b[4mo\u001b[24m" + ], + [ + 0.166203, + "\b\u001b[4mo\u001b[4mu\u001b[24m" + ], + [ + 0.138953, + "\b\u001b[4mu\u001b[4mn\u001b[24m" + ], + [ + 0.177723, + "\b\u001b[4mn\u001b[4mt\u001b[24m" + ], + [ + 1.371278, + "\u001b[?1l\u001b>" + ], + [ + 0.001184, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000603, + "\u001b]2;borg mount :: /tmp/mount\u0007\u001b]1;borg\u0007" + ], + [ + 0.651025, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001453, + "\u001b]7;\u0007" + ], + [ + 0.000984, + "\u001b]7;\u0007" + ], + [ + 7.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 2.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.0002, + "\u001b[?2004h" + ], + [ + 1.860515, + "\u001b[32ml\u001b[39m" + ], + [ + 0.107896, + "\b\u001b[32ml\u001b[32ms\u001b[39m" + ], + [ + 0.253911, + " " + ], + [ + 0.203092, + "-" + ], + [ + 0.178525, + "l" + ], + [ + 0.111795, + "a" + ], + [ + 0.200138, + " " + ], + [ + 0.353001, + "\u001b[4m/\u001b[24m" + ], + [ + 0.264827, + "\b\u001b[4m/\u001b[4mt\u001b[24m" + ], + [ + 0.205749, + "\b\u001b[4mt\u001b[4mm\u001b[24m" + ], + [ + 0.168679, + "\b\u001b[4mm\u001b[4mp\u001b[24m" + ], + [ + 0.016649, + "\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp" + ], + [ + 0.712108, + "\b\b\b\b\u001b[4m/\u001b[4mt\u001b[4mm\u001b[4mp\u001b[24m \b" + ], + [ + 0.383057, + "\b\u001b[4mp\u001b[4m/\u001b[24m" + ], + [ + 0.159994, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.187645, + "\b\u001b[4mm\u001b[4mo\u001b[24m" + ], + [ + 0.168813, + "\b\u001b[4mo\u001b[4mu\u001b[24m" + ], + [ + 0.12933, + "\b\u001b[4mu\u001b[4mn\u001b[24m" + ], + [ + 0.421583, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.018359, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mls\u001b[39m -la \u001b[4m/tmp/mount\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.602087, + "\b\b\u001b[4mt\u001b[24m\u001b[0m\u001b[24m \b" + ], + [ + 2.5e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.000874, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000682, + "\u001b]2;ls --color=tty -la /tmp/mount\u0007\u001b]1;ls\u0007" + ], + [ + 0.002495, + "total 0\r\n" + ], + [ + 4.4e-05, + "drwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[0m\u001b[38;5;33m.\u001b[0m\r\ndrwxrwxrwt. 27 root root 660 Jul 16 18:58 \u001b[48;5;10;38;5;16m..\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup-block-device\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup1\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup2\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mbackup3\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mrugk-2017-07-16T18:51:34\u001b[0m\r\ndrwxr-xr-x. 1 rugk rugk 0 Jul 16 18:58 \u001b[38;5;33mrugk-2017-07-16T18:52:19\u001b[0m\r\n" + ], + [ + 0.000169, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000984, + "\u001b]7;\u0007" + ], + [ + 0.00097, + "\u001b]7;\u0007" + ], + [ + 2.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000122, + "\u001b[?1h\u001b=" + ], + [ + 0.000251, + "\u001b[?2004h" + ], + [ + 0.482339, + "\u001b[4mb\u001b[24m" + ], + [ + 0.179808, + "\b\u001b[24m\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.105817, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.116974, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.173004, + " " + ], + [ + 0.216291, + "u" + ], + [ + 0.168732, + "m" + ], + [ + 0.228004, + "o" + ], + [ + 0.19268, + "u" + ], + [ + 0.14303, + "n" + ], + [ + 0.175557, + "t" + ], + [ + 0.406596, + " " + ], + [ + 0.69215, + "\u001b[4m/\u001b[24m" + ], + [ + 0.242216, + "\b\u001b[4m/\u001b[4mt\u001b[24m" + ], + [ + 0.260453, + "\b\u001b[4mt\u001b[4mm\u001b[24m" + ], + [ + 0.2605, + "\b\u001b[4mm\u001b[4mp\u001b[24m" + ], + [ + 0.014483, + "\b\b\b\b\u001b[24m/\u001b[24mt\u001b[24mm\u001b[24mp" + ], + [ + 0.597766, + "\b\b\b\b\u001b[4m/\u001b[4mt\u001b[4mm\u001b[4mp\u001b[24m \b" + ], + [ + 0.482551, + "\b\u001b[4mp\u001b[4m/\u001b[24m" + ], + [ + 0.236361, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.212317, + "\b\u001b[4mm\u001b[4mo\u001b[24m" + ], + [ + 0.160611, + "\b\u001b[4mo\u001b[4mu\u001b[24m" + ], + [ + 0.142036, + "\b\u001b[4mu\u001b[4mn\u001b[24m" + ], + [ + 0.335664, + "\b\u001b[4mn\u001b[4mt\u001b[24m" + ], + [ + 1.159614, + "\u001b[?1l\u001b>" + ], + [ + 0.001057, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000642, + "\u001b]2;borg umount /tmp/mount\u0007\u001b]1;borg\u0007" + ], + [ + 0.596849, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.001387, + "\u001b]7;\u0007" + ], + [ + 0.001067, + "\u001b]7;\u0007" + ], + [ + 9.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000187, + "\u001b[?2004h" + ], + [ + 1.467084, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.264583, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.277487, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.12184, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.103403, + "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.125651, + "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.465663, + "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.298764, + "\b\u001b[1m'\u001b[1ms it, but of course there is more to explore, so have a look at the d\u001b[1mo\u001b[1mcs.\u001b[0m\u001b[39m\u001b[K\r\r\n\u001b[K\u001b[A\u001b[4C" + ], + [ + 1.453815, + "\u001b[1B\r\u001b[K\u001b[A\u001b[4C" + ], + [ + 2e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.000725, + "\u001b[?2004l\u001b[1B\r\r\n" + ], + [ + 0.00054, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00118, + "\u001b]7;\u0007" + ], + [ + 0.000909, + "\u001b]7;\u0007" + ], + [ + 7.8e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000524, + "\u001b[?2004h" + ], + [ + 1.74407, + "\u001b[?2004l\r\r\n" + ] + ] +} diff --git a/docs/misc/asciinema/advanced.sh b/docs/misc/asciinema/advanced.sh new file mode 100644 index 0000000000..e09d7bcb0c --- /dev/null +++ b/docs/misc/asciinema/advanced.sh @@ -0,0 +1,65 @@ +# For the pro users, here are some advanced features of borg, so you can impress your friends. ;) +# Note: This screencast was made with borg version 1.1.0 – older or newer borg versions may behave differently. + +# First of all, we can use several environment variables for borg. +# E.g. we do not want to type in our repo path and password again and again… +export BORG_REPO='/media/backup/borgdemo' +export BORG_PASSPHRASE='1234' +# Problem solved, borg will use this automatically… :) +# We'll use this right away… + +## ADVANCED CREATION ## + +# We can also use some placeholders in our archive name… +borg create --stats --progress --compression lz4 ::{user}-{now} Wallpaper +# Notice the backup name. + +# And we can put completely different data, with different backup settings, in our backup. It will be deduplicated, anyway: +borg create --stats --progress --compression zlib,6 --exclude ~/Downloads/big ::{user}-{now} ~/Downloads + +# Or let's backup a device via STDIN. +sudo dd if=/dev/loop0 bs=10M | borg create --progress --stats ::specialbackup - + +# Let's continue with some simple things: +## USEFUL COMMANDS ## +# You can show some information about an archive. You can even do it without needing to specify the archive name: +borg info :: --last 1 + +# So let's rename our last archive: +borg rename ::specialbackup backup-block-device + +borg info :: --last 1 + +# A very important step if you choose keyfile mode (where the keyfile is only saved locally) is to export your keyfile and possibly print it, etc. +borg key export :: --qr-code file.html # this creates a nice HTML, but when you want something simpler… +< remove comment > +< let there: borg check > --paper # this is a "manual input"-only backup (but it is also included in the --qr-code option) + +## MAINTENANCE ## +# Sometimes backups get broken or we want a regular "checkup" that everything is okay… +borg check -v :: + +# Next problem: Usually you do not have infinite disk space. So you may need to prune your archive… +# You can tune this in every detail. See the docs for details. Here only a simple example: +borg prune --list --keep-last 1 --dry-run +# When actually executing it in a script, you have to use it without the --dry-run option, of course. + +## RESTORE ## + +# When you want to see the diff between two archives use this command. +# E.g. what happened between the first two backups? +borg diff ::backup1 backup2 +# Ah, we added a file, right… + +# There are also other ways to extract the data. +# E.g. as a tar archive. +borg export-tar --progress ::backup2 backup.tar.gz +ls -l + +# You can mount an archive or even the whole repository: +mkdir /tmp/mount +borg mount :: /tmp/mount +ls -la /tmp/mount +borg umount /tmp/mount + +# That's it, but of course there is more to explore, so have a look at the docs. diff --git a/docs/misc/asciinema/basic.json b/docs/misc/asciinema/basic.json new file mode 100644 index 0000000000..9eaf89356b --- /dev/null +++ b/docs/misc/asciinema/basic.json @@ -0,0 +1,4862 @@ +{ + "version": 1, + "width": 78, + "height": 25, + "duration": 379.234504, + "command": null, + "title": null, + "env": { + "TERM": "xterm-256color", + "SHELL": "/bin/zsh" + }, + "stdout": [ + [ + 0.000155, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000133, + "\u001b[?1h\u001b=" + ], + [ + 0.000183, + "\u001b[?2004h" + ], + [ + 0.468833, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.413214, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.440799, + "\b\b\u001b[1m#\u001b[1m \u001b[1mH\u001b[0m\u001b[39m" + ], + [ + 0.155436, + "\b\u001b[1mH\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.153888, + "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.145046, + "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.191005, + "\b\u001b[1me\u001b[1m you'll see some basic commands to start working with borg.\u001b[0m\u001b[39m" + ], + [ + 0.328571, + "\u001b[?1l\u001b>" + ], + [ + 0.000462, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000787, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 7.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 1.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.0003, + "\u001b[?2004h" + ], + [ + 0.553943, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.254153, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.205346, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.164037, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.198817, + "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.157487, + "\b\u001b[1mt\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.348855, + "\b\u001b[1me\u001b[1m:\u001b[0m\u001b[39m" + ], + [ + 0.308837, + "\b\u001b[1m:\u001b[1m This teaser screencast was made with borg version 1.1.0 – older or n\u001b[1me\u001b[1mwer borg versions may behave differently.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 0.760183, + "\u001b[?1l\u001b>" + ], + [ + 0.001229, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001043, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000111, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000699, + "\u001b[?2004h" + ], + [ + 0.617302, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.269944, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.231147, + "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" + ], + [ + 0.157768, + "\b\u001b[1mB\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.145012, + "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.360132, + "\b\u001b[1mt\u001b[1m let's start.\u001b[0m\u001b[39m" + ], + [ + 0.808076, + "\u001b[?1l\u001b>" + ], + [ + 0.000384, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001063, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000271, + "\u001b[?2004h" + ], + [ + 1.213811, + "\u001b[?1l\u001b>" + ], + [ + 0.000271, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001041, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 7e-06, + "\u001b]1;~/Pictures\u0007" + ], + [ + 4.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000167, + "\u001b[?2004h" + ], + [ + 0.326924, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.245919, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.173421, + "\b\b\u001b[1m#\u001b[1m \u001b[1mF\u001b[0m\u001b[39m" + ], + [ + 0.121947, + "\b\u001b[1mF\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.196316, + "\b\u001b[1mi\u001b[1mr\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.224037, + "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.323925, + "\b\u001b[1mt\u001b[1m of all, you can always get help:\u001b[0m\u001b[39m" + ], + [ + 0.738987, + "\u001b[?1l\u001b>" + ], + [ + 0.000395, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000643, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000107, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 3.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00031, + "\u001b[?2004h" + ], + [ + 1.268663, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.19562, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.100091, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.157538, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.196595, + " " + ], + [ + 0.210071, + "h" + ], + [ + 0.124892, + "e" + ], + [ + 0.177906, + "l" + ], + [ + 0.121006, + "p" + ], + [ + 0.314487, + "\u001b[?1l\u001b>" + ], + [ + 0.000695, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000777, + "\u001b]2;borg help\u0007\u001b]1;borg\u0007" + ], + [ + 0.538908, + "usage: borg [-V] [-h] [--critical] [--error] [--warning] [--info] [--debug]\r\n [--debug-topic TOPIC] [-p] [--log-json] [--lock-wait N]\r\n [--show-version] [--show-rc] [--no-files-cache] [--umask M]\r\n [--remote-path PATH] [--remote-ratelimit rate]\r\n [--consider-part-files] [--debug-profile FILE]\r\n ...\r\n\r\nBorg - Deduplicated Backups\r\n\r\noptional arguments:\r\n -V, --version show version number and exit\r\n\r\nCommon options:\r\n -h, --help show this help message and exit\r\n --critical work on log level CRITICAL\r\n --error work on log level ERROR\r\n --warning work on log level WARNING (default)\r\n --info, -v, --verbose\r\n work on log level INFO\r\n --debug enable debug output, work on log level DEBUG\r\n --debug-topic TOPIC enable TOPIC debugging (can be specified multiple\r\n times). The logger path is borg.debug. if TOPIC\r\n " + ], + [ + 4.1e-05, + " is not fully qualified.\r\n -p, --progress show progress information\r\n --log-json Output one JSON object per log line instead of\r\n formatted text.\r\n --lock-wait N wait for the lock, but max. N seconds (default: 1).\r\n --show-version show/log the borg version\r\n --show-rc show/log the return code (rc)\r\n --no-files-cache do not load/update the file metadata cache used to\r\n detect unchanged files\r\n --umask M set umask to M (local and remote, default: 0077)\r\n --remote-path PATH use PATH as borg executable on the remote (default:\r\n \"borg\")\r\n --remote-ratelimit rate\r\n set remote network upload rate limit in kiByte/s\r\n (default: 0=unlimited)\r\n --consider-part-files\r\n treat part files like normal files (e.g. to\r\n list/extract them)\r\n --debug-profile FILE Write execution profile" + ], + [ + 1.6e-05, + " in Borg format into FILE. For\r\n local use a Python-compatible file can be generated by\r\n suffixing FILE with \".pyprof\".\r\n\r\nrequired arguments:\r\n \r\n serve start repository server process\r\n init initialize empty repository\r\n check verify repository\r\n key manage repository key\r\n change-passphrase change repository passphrase\r\n create create backup\r\n extract extract archive contents\r\n export-tar create tarball from archive\r\n diff find differences in archive contents\r\n rename rename archive\r\n delete delete archive\r\n list list archive or repository contents\r\n mount mount repository\r\n umount umount repository\r\n info show repository or archive information\r\n break-lock break repository and cache locks\r\n prune " + ], + [ + 2e-05, + " prune archives\r\n upgrade upgrade repository format\r\n recreate Re-create archives\r\n with-lock run user command with lock held\r\n debug debugging command (not intended for normal use)\r\n benchmark benchmark command\r\n" + ], + [ + 0.043747, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 8e-06, + "\u001b]1;~/Pictures\u0007" + ], + [ + 5.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 4.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000163, + "\u001b[?2004h" + ], + [ + 1.509225, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.593308, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.334291, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.170683, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.07295, + "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.184509, + "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.136032, + "\b\u001b[1ms\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.250718, + "\b\u001b[1me\u001b[1m are a lot of commands, so better we start with a few:\u001b[0m\u001b[39m" + ], + [ + 1.088446, + "\u001b[?1l\u001b>" + ], + [ + 0.000396, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000604, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000101, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 4.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000296, + "\u001b[?2004h" + ], + [ + 0.921744, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.276219, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.204903, + "\b\b\u001b[1m#\u001b[1m \u001b[1mL\u001b[0m\u001b[39m" + ], + [ + 0.137064, + "\b\u001b[1mL\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.16386, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.340061, + "\b\u001b[1mt\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.115905, + "\b\u001b[1m'\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.213255, + "\b\u001b[1ms\u001b[1m create a repo on an external drive:\u001b[0m\u001b[39m" + ], + [ + 1.086717, + "\u001b[?1l\u001b>" + ], + [ + 0.000391, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000606, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000133, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000274, + "\u001b[?2004h" + ], + [ + 1.935612, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.184978, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.115803, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.134282, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.266061, + " " + ], + [ + 0.599046, + "i" + ], + [ + 0.183493, + "n" + ], + [ + 0.181453, + "i" + ], + [ + 0.258375, + "t" + ], + [ + 0.712329, + " " + ], + [ + 0.381053, + "" + ], + [ + 0.381053, + "-" + ], + [ + 0.119206, + "-" + ], + [ + 0.18993, + "e" + ], + [ + 0.175168, + "n" + ], + [ + 0.258977, + "c" + ], + [ + 0.139364, + "r" + ], + [ + 0.111012, + "y" + ], + [ + 0.55406, + "p" + ], + [ + 0.261667, + "t" + ], + [ + 0.284611, + "i" + ], + [ + 0.142087, + "o" + ], + [ + 0.195185, + "n" + ], + [ + 0.23882, + "=" + ], + [ + 0.31059, + "r" + ], + [ + 0.151355, + "e" + ], + [ + 0.165925, + "p" + ], + [ + 0.132833, + "o" + ], + [ + 0.253402, + "k" + ], + [ + 0.174711, + "e" + ], + [ + 0.245888, + "y" + ], + [ + 0.759586, + " " + ], + [ + 0.383355, + "\u001b[4m/\u001b[24m" + ], + [ + 0.189694, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.16364, + "\b\u001b[4mm\u001b[4me\u001b[24m" + ], + [ + 0.151451, + "\b\u001b[4me\u001b[4md\u001b[24m" + ], + [ + 0.239109, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.006487, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m init --encryption=repokey \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.268216, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003429, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m init --encryption=repokey \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.232352, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003575, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m init --encryption=repokey \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.492094, + "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" + ], + [ + 0.748712, + "\u001b[?1l\u001b>" + ], + [ + 0.001017, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000712, + "\u001b]2;borg init --encryption=repokey /media/backup/borgdemo\u0007\u001b]1;borg\u0007" + ], + [ + 0.548105, + "Enter new passphrase: " + ], + [ + 2.119749, + "\r\n" + ], + [ + 0.000155, + "Enter same passphrase again: " + ], + [ + 1.606761, + "\r\n" + ], + [ + 5.8e-05, + "Do you want your passphrase to be displayed for verification? [yN]: " + ], + [ + 0.901237, + "\r\n" + ], + [ + 0.362453, + "\r\nBy default repositories initialized with this version will produce security\r\nerrors if written to with an older version (up to and including Borg 1.0.8).\r\n\r\nIf you want to use these older versions, you can disable the check by running:\r\nborg upgrade --disable-tam '/media/backup/borgdemo'\r\n\r\nSee https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability for details about the security implications.\r\n" + ], + [ + 0.050488, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000166, + "\u001b[?2004h" + ], + [ + 2.49308, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.308744, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.256774, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.157732, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.127107, + "\b\u001b[1mh\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.178449, + "\b\u001b[1mi\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.179372, + "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.383584, + "\b\u001b[1m \u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.103361, + "\b\u001b[1mu\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.155066, + "\b\u001b[1ms\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.133308, + "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.23615, + "\b\u001b[1ms\u001b[1m the repokey encryption. You may look at \"borg help init\" or the \u001b[1mo\u001b[1mnline doc at https://borgbackup.readthedocs.io/ for other modes.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.159159, + "\u001b[?1l\u001b>" + ], + [ + 0.0004, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000738, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000111, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000286, + "\u001b[?2004h" + ], + [ + 1.645569, + "\u001b[?1l\u001b>" + ], + [ + 0.000452, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000619, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.0001, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000161, + "\u001b[?2004h" + ], + [ + 1.17234, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.575706, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.205759, + "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" + ], + [ + 0.343517, + "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.245497, + "\b\u001b[1mo\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.218486, + "\b\u001b[1m \u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.171258, + "\b\u001b[1mn\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.146364, + "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.25775, + "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.271708, + "\b\b\u001b[1mw\u001b[0m\u001b[39m\u001b[0m\u001b[39m \b" + ], + [ + 0.213838, + "\b\u001b[1mw\u001b[1m,\u001b[0m\u001b[39m" + ], + [ + 0.422324, + "\b\u001b[1m,\u001b[1m let's create our first (compressed) backup.\u001b[0m\u001b[39m" + ], + [ + 0.561514, + "\u001b[?1l\u001b>" + ], + [ + 0.000855, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000773, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 4.8e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000411, + "\u001b[?2004h" + ], + [ + 1.326196, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.191851, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.136657, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.142499, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.173217, + " " + ], + [ + 0.294445, + "c" + ], + [ + 0.200519, + "r" + ], + [ + 0.153078, + "e" + ], + [ + 0.133383, + "a" + ], + [ + 0.12891, + "t" + ], + [ + 0.151491, + "e" + ], + [ + 0.728709, + " " + ], + [ + 0.592118, + "-" + ], + [ + 0.118108, + "-" + ], + [ + 0.277349, + "s" + ], + [ + 0.134588, + "t" + ], + [ + 0.148057, + "a" + ], + [ + 0.090202, + "t" + ], + [ + 0.150971, + "s" + ], + [ + 0.307217, + " " + ], + [ + 0.481688, + "-" + ], + [ + 0.112243, + "-" + ], + [ + 0.234317, + "p" + ], + [ + 0.12453, + "r" + ], + [ + 0.116446, + "o" + ], + [ + 0.213657, + "g" + ], + [ + 0.12239, + "r" + ], + [ + 0.165156, + "e" + ], + [ + 0.256082, + "s" + ], + [ + 0.175158, + "s" + ], + [ + 0.302493, + " " + ], + [ + 0.490303, + "-" + ], + [ + 0.117279, + "-" + ], + [ + 0.130499, + "c" + ], + [ + 0.146261, + "o" + ], + [ + 0.139848, + "m" + ], + [ + 0.156108, + "p" + ], + [ + 0.190058, + "r" + ], + [ + 0.166862, + "e" + ], + [ + 0.261225, + "s" + ], + [ + 0.157133, + "s" + ], + [ + 0.281205, + "i" + ], + [ + 0.142487, + "o" + ], + [ + 0.179023, + "n" + ], + [ + 0.854723, + " " + ], + [ + 0.580178, + "l" + ], + [ + 0.29757, + "z" + ], + [ + 0.3111, + "4" + ], + [ + 1.085772, + " " + ], + [ + 0.635539, + "\u001b[4m/\u001b[24m" + ], + [ + 0.268857, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.121341, + "\b\u001b[4mm\u001b[4me\u001b[24m" + ], + [ + 0.141645, + "\b\u001b[4me\u001b[4md\u001b[24m" + ], + [ + 0.230858, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.010346, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.416084, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.004048, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.346657, + "\u001b[?7l" + ], + [ + 2.7e-05, + "\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003996, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m create --stats --progress --compression lz4 \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 1.068791, + "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" + ], + [ + 1.210608, + "\u001b[22D\u001b[24m/\u001b[24mm\u001b[24me\u001b[24md\u001b[24mi\u001b[24ma\u001b[24m/\u001b[24mb\u001b[24ma\u001b[24mc\u001b[24mk\u001b[24mu\u001b[24mp\u001b[24m/\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24md\u001b[24me\u001b[24mm\u001b[24mo:" + ], + [ + 0.125995, + ":" + ], + [ + 0.376036, + "b" + ], + [ + 0.101011, + "a" + ], + [ + 0.178171, + "c \r\u001b[K" + ], + [ + 0.133561, + "k" + ], + [ + 0.162923, + "\rku" + ], + [ + 0.241519, + "p" + ], + [ + 1.426974, + "1" + ], + [ + 0.432275, + " " + ], + [ + 0.295102, + "\u001b[4mW\u001b[24m" + ], + [ + 0.158768, + "\b\u001b[4mW\u001b[4ma\u001b[24m" + ], + [ + 0.270666, + "\b\u001b[4ma\u001b[4ml\u001b[24m" + ], + [ + 0.13015, + "\b\u001b[4ml\u001b[4ml\u001b[24m" + ], + [ + 0.267749, + "\b\u001b[4ml\u001b[4mp\u001b[24m" + ], + [ + 0.173461, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003997, + "\u001b[A\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ " + ], + [ + 3.5e-05, + "\u001b[32mborg\u001b[39m create --stats --progress --compression lz4 /media/backup/borgdemo::backup1 \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.997225, + "\b\b\u001b[4mr\u001b[24m\u001b[K" + ], + [ + 0.447022, + "\u001b[?1l\u001b>" + ], + [ + 0.002978, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000917, + "\u001b]2;borg create --stats --progress --compression lz4 Wallpaper\u0007\u001b]1;borg\u0007" + ], + [ + 0.630228, + "Enter passphrase for key /media/backup/borgdemo: " + ], + [ + 2.264647, + "\r\n" + ], + [ + 0.108689, + "0 B O 0 B C 0 B D 0 N Wallpaper \r" + ], + [ + 0.024194, + "Initializing cache transaction: Reading config \r" + ], + [ + 0.000234, + "Initializing cache transaction: Reading chunks \r" + ], + [ + 0.000225, + "Initializing cache transaction: Reading files \r" + ], + [ + 0.000215, + " \r" + ], + [ + 0.179719, + "5.21 MB O 5.21 MB C 5.21 MB D 8 N Wallpaper/bigcollec...em_Wasserloch_im_Q.jpg\r" + ], + [ + 0.206362, + "17.55 MB O 17.51 MB C 15.18 MB D 31 N Wallpaper/bigcoll...ckr_OpenGetty_Im.jpg\r" + ], + [ + 0.202173, + "28.63 MB O 28.59 MB C 26.26 MB D 49 N Wallpaper/bigcoll...ortugal____Stefa.jpg\r" + ], + [ + 0.201105, + "41.30 MB O 41.26 MB C 38.94 MB D 71 N Wallpaper/bigcoll...e_in_der_Gro_auf.jpg\r" + ], + [ + 0.205913, + "53.63 MB O 53.54 MB C 51.21 MB D 93 N Wallpaper/bigcoll...De_Janeiro__Bras.jpg\r" + ], + [ + 0.201657, + "66.10 MB O 65.99 MB C 63.66 MB D 115 N Wallpaper/bigcol...Kenai-Fjords-Nat.jpg\r" + ], + [ + 0.222663, + "78.06 MB O 77.92 MB C 75.59 MB D 135 N Wallpaper/bigcol...ien____Nora_De_A.jpg\r" + ], + [ + 0.206809, + "89.99 MB O 89.82 MB C 85.43 MB D 155 N Wallpaper/bigcol...__De_Pere__Wisco.jpg\r" + ], + [ + 0.204475, + "101.51 MB O 101.32 MB C 96.93 MB D 175 N Wallpaper/bigco..._Silver_Falls_S.jpg\r" + ], + [ + 0.206201, + "115.08 MB O 114.89 MB C 110.50 MB D 199 N Wallpaper/bigco..._Garret_Suhrie.jpg\r" + ], + [ + 0.202147, + "126.51 MB O 126.28 MB C 119.47 MB D 220 N Wallpaper/bigco...fenmesserfisch.jpg\r" + ], + [ + 0.206629, + "138.74 MB O 138.50 MB C 131.69 MB D 243 N Wallpaper/bigco...tswana____Mich.jpg\r" + ], + [ + 0.214855, + "152.84 MB O 152.60 MB C 142.74 MB D 269 N Wallpaper/bigco...fest__Munich__.jpg\r" + ], + [ + 0.200083, + "163.05 MB O 162.80 MB C 152.94 MB D 288 N Wallpaper/bigco..._Marco_RomaniG.jpg\r" + ], + [ + 0.208535, + "175.85 MB O 175.57 MB C 164.47 MB D 308 N Wallpaper/bigco...gway__Colorado.jpg\r" + ], + [ + 0.21234, + "184.65 MB O 184.36 MB C 173.25 MB D 324 N Wallpaper/bigco...nstanz__Baden-.jpg\r" + ], + [ + 0.200087, + "194.92 MB O 194.59 MB C 183.49 MB D 343 N Wallpaper/bigco...op__Caledon__P.jpg\r" + ], + [ + 0.201257, + "204.71 MB O 204.38 MB C 191.68 MB D 361 N Wallpaper/bigco...izian_in_Jamni.jpg\r" + ], + [ + 0.213355, + "217.22 MB O 216.88 MB C 202.98 MB D 382 N Wallpaper/bigco...appadokien__T_.jpg\r" + ], + [ + 0.202274, + "230.56 MB O 230.16 MB C 212.45 MB D 404 N Wallpaper/bigco...eleiGetty_Imag.jpg\r" + ], + [ + 0.204836, + "242.95 MB O 242.53 MB C 224.34 MB D 426 N Wallpaper/bigco...g__Thailand___.jpg\r" + ], + [ + 0.205093, + "254.42 MB O 254.02 MB C 232.75 MB D 446 N Wallpaper/bigco...ame_Reserve__O.jpg\r" + ], + [ + 0.201488, + "265.77 MB O 265.39 MB C 242.76 MB D 466 N Wallpaper/bigco...e_Republik____.jpg\r" + ], + [ + 0.20036, + "278.64 MB O 278.26 MB C 254.62 MB D 488 N Wallpaper/bigco...ien____Patty_P.jpg\r" + ], + [ + 0.209301, + "288.82 MB O 288.45 MB C 264.81 MB D 505 N Wallpaper/bigco...Ruhpolding__Ch.jpg\r" + ], + [ + 0.214561, + "298.04 MB O 297.68 MB C 274.04 MB D 520 N Wallpaper/bigco...wo__Landkreis_.jpg\r" + ], + [ + 0.222111, + "311.03 MB O 310.66 MB C 287.02 MB D 543 N Wallpaper/bigco...a__Portugal___.jpg\r" + ], + [ + 0.204945, + "319.53 MB O 319.17 MB C 295.53 MB D 558 N Wallpaper/bigco...hinos__Hondura.jpg\r" + ], + [ + 0.213928, + "328.19 MB O 327.77 MB C 304.13 MB D 574 N Wallpaper/bigco...ndon__Gro_brit.jpg\r" + ], + [ + 0.206827, + "338.25 MB O 337.81 MB C 314.17 MB D 591 N Wallpaper/bigco...l_Forest__Bund.jpg\r" + ], + [ + 0.209094, + "347.40 MB O 346.96 MB C 323.32 MB D 606 N Wallpaper/bigco...tlantischen_Oz.jpg\r" + ], + [ + 0.200671, + "361.16 MB O 360.71 MB C 334.04 MB D 628 N Wallpaper/bigco...lpark__British.jpg\r" + ], + [ + 0.208778, + "375.20 MB O 374.77 MB C 348.09 MB D 650 N Wallpaper/bigco...swagen_beim_Ro.jpg\r" + ], + [ + 0.2023, + "385.94 MB O 385.47 MB C 358.79 MB D 669 N Wallpaper/bigco...-Bessin__Frank.jpg\r" + ], + [ + 0.201448, + "396.55 MB O 396.10 MB C 368.89 MB D 687 N Wallpaper/bigco...nian_Switzerla.jpg\r" + ], + [ + 0.200229, + "411.96 MB O 411.41 MB C 373.94 MB D 711 N Wallpaper/bigco...CREATISTAGetty.jpg\r" + ], + [ + 0.202083, + "420.92 MB O 420.38 MB C 382.91 MB D 727 N Wallpaper/bigco...LLCCorbisVCG_G.jpg\r" + ], + [ + 0.202677, + "430.76 MB O 430.21 MB C 392.74 MB D 745 N Wallpaper/bigco...r__Tansania___.jpg\r" + ], + [ + 0.206733, + "441.45 MB O 440.87 MB C 400.76 MB D 763 N Wallpaper/bigco...andenburg__Deu.jpg\r" + ], + [ + 0.205541, + "449.42 MB O 448.83 MB C 408.72 MB D 776 N Wallpaper/bigco...Wind_Cave_Nati.jpg\r" + ], + [ + 0.201764, + "458.56 MB O 457.97 MB C 417.20 MB D 792 N Wallpaper/bigco...dney_Harbour_B.jpg\r" + ], + [ + 0.206272, + "470.73 MB O 470.08 MB C 428.74 MB D 815 N Wallpaper/bigco...hland____Patri.jpg\r" + ], + [ + 0.210875, + "485.80 MB O 485.15 MB C 443.01 MB D 843 N Wallpaper/bigco...Hokkaido__Japa.jpg\r" + ], + [ + 0.227162, + "498.93 MB O 498.27 MB C 450.80 MB D 867 N Wallpaper/bigco...topher_Collins.jpg\r" + ], + [ + 0.206293, + "510.73 MB O 510.07 MB C 462.15 MB D 887 N Wallpaper/bigco...itzeinschlag_i.jpg\r" + ], + [ + 0.200265, + "520.54 MB O 519.86 MB C 471.39 MB D 903 N Wallpaper/bigco..._zwischen_Boli.jpg\r" + ], + [ + 0.204067, + "528.01 MB O 527.33 MB C 478.86 MB D 916 N Wallpaper/bigco...jall__Island__.jpg\r" + ], + [ + 0.209223, + "539.61 MB O 538.94 MB C 490.47 MB D 934 N Wallpaper/bigco..._amares__Provi.jpg\r" + ], + [ + 0.215843, + "551.16 MB O 550.49 MB C 501.50 MB D 952 N Wallpaper/bigco...tionalpark__Ut.jpg\r" + ], + [ + 0.212909, + "561.29 MB O 560.60 MB C 511.22 MB D 970 N Wallpaper/bigco..._Inseln__Niede.jpg\r" + ], + [ + 0.209655, + "571.59 MB O 570.86 MB C 520.92 MB D 989 N Wallpaper/bigco...rbeskopf__Huns.jpg\r" + ], + [ + 0.232431, + "582.52 MB O 581.80 MB C 525.99 MB D 1006 N Wallpaper/bigc...n__an_art_in 2.jpg\r" + ], + [ + 0.201199, + "593.36 MB O 592.12 MB C 536.31 MB D 1036 N Wallpaper/more/Green_Curves.jpg \r" + ], + [ + 0.205747, + "604.80 MB O 603.52 MB C 547.71 MB D 1044 N Wallpaper/evenmore/ChipDE 06.jpg \r" + ], + [ + 0.23016, + "Compacting segments 0% \r" + ], + [ + 0.174726, + "Compacting segments 50% \r" + ], + [ + 4.5e-05, + " \r" + ], + [ + 0.04695, + "Saving files cache \r" + ], + [ + 0.005688, + "Saving chunks cache \r" + ], + [ + 0.000299, + "Saving cache config \r" + ], + [ + 0.107527, + " \r" + ], + [ + 3.7e-05, + " \r" + ], + [ + 0.000355, + "------------------------------------------------------------------------------\r\n" + ], + [ + 3.7e-05, + "Archive name: backup1\r\n" + ], + [ + 1.4e-05, + "Archive fingerprint: 9758c7db339a066360bffad17b2ffac4fb368c6722c0be3a47a7a9b631f06407\r\n" + ], + [ + 0.000106, + "Time (start): Fri, 2017-07-14 21:54:06\r\nTime (end): Fri, 2017-07-14 21:54:17\r\n" + ], + [ + 3.9e-05, + "Duration: 11.40 seconds\r\n" + ], + [ + 3.4e-05, + "Number of files: 1050\r\n" + ], + [ + 7.2e-05, + "Utilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n" + ], + [ + 4.7e-05, + " Original size Compressed size Deduplicated size\r\n" + ], + [ + 1.1e-05, + "This archive: 618.96 MB 617.47 MB 561.67 MB\r\n" + ], + [ + 2.7e-05, + "All archives: 618.96 MB 617.47 MB 561.67 MB\r\n" + ], + [ + 2.4e-05, + "\r\n" + ], + [ + 2.3e-05, + " Unique chunks Total chunks\r\n" + ], + [ + 1.3e-05, + "Chunk index: 999 1093\r\n" + ], + [ + 2.4e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 0.04885, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000195, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000196, + "\u001b[?2004h" + ], + [ + 1.403148, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.918581, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.361872, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.12148, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.21559, + "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.152309, + "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.941741, + "\b\u001b[1mt\u001b[1m's nice, so far.\u001b[0m\u001b[39m" + ], + [ + 1.005262, + "\u001b[?1l\u001b>" + ], + [ + 0.00039, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001061, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 8.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000287, + "\u001b[?2004h" + ], + [ + 2.564637, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.34769, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.160447, + "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" + ], + [ + 0.153165, + "\b\u001b[1mS\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.17514, + "\b\u001b[1mo\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.198658, + "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.204631, + "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.250815, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 1.190059, + "\b\u001b[1mt\u001b[1m's add a new file…\u001b[0m\u001b[39m" + ], + [ + 1.216941, + "\u001b[?1l\u001b>" + ], + [ + 0.000401, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000756, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 2.1e-05, + "\u001b]1;~/Pictures\u0007" + ], + [ + 8.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000277, + "\u001b[?2004h" + ], + [ + 1.289557, + "\u001b[1m\u001b[31me\u001b[0m\u001b[39m" + ], + [ + 0.216875, + "\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mc\u001b[0m\u001b[39m" + ], + [ + 0.184187, + "\b\b\u001b[1m\u001b[31me\u001b[1m\u001b[31mc\u001b[1m\u001b[31mh\u001b[0m\u001b[39m" + ], + [ + 0.177444, + "\b\b\b\u001b[0m\u001b[32me\u001b[0m\u001b[32mc\u001b[0m\u001b[32mh\u001b[32mo\u001b[39m" + ], + [ + 0.226152, + " " + ], + [ + 0.320216, + "\u001b[33m\"\u001b[39m" + ], + [ + 0.404454, + "\b\u001b[33m\"\u001b[33ma\u001b[39m" + ], + [ + 0.267657, + "\b\u001b[33ma\u001b[33md\u001b[39m" + ], + [ + 0.130258, + "\b\u001b[33md\u001b[33md\u001b[39m" + ], + [ + 1.613237, + "\b\u001b[33md\u001b[33me\u001b[39m" + ], + [ + 0.175381, + "\b\u001b[33me\u001b[33md\u001b[39m" + ], + [ + 0.404248, + "\b\u001b[33md\u001b[33m \u001b[39m" + ], + [ + 0.669276, + "\b\u001b[33m \u001b[33mn\u001b[39m" + ], + [ + 0.128663, + "\b\u001b[33mn\u001b[33me\u001b[39m" + ], + [ + 0.132483, + "\b\u001b[33me\u001b[33mw\u001b[39m" + ], + [ + 0.175823, + "\b\u001b[33mw\u001b[33m \u001b[39m" + ], + [ + 0.220023, + "\b\u001b[33m \u001b[33mn\u001b[39m" + ], + [ + 0.156931, + "\b\u001b[33mn\u001b[33mi\u001b[39m" + ], + [ + 0.10604, + "\b\u001b[33mi\u001b[33mc\u001b[39m" + ], + [ + 0.166585, + "\b\u001b[33mc\u001b[33me\u001b[39m" + ], + [ + 0.306911, + "\b\u001b[33me\u001b[33m \u001b[39m" + ], + [ + 0.228895, + "\b\u001b[33m \u001b[33mf\u001b[39m" + ], + [ + 0.160772, + "\b\u001b[33mf\u001b[33mi\u001b[39m" + ], + [ + 0.144448, + "\b\u001b[33mi\u001b[33ml\u001b[39m" + ], + [ + 0.125193, + "\b\u001b[33ml\u001b[33me\u001b[39m" + ], + [ + 0.828758, + "\b\u001b[33me\u001b[33m\"\u001b[39m" + ], + [ + 0.566156, + " " + ], + [ + 0.349791, + ">" + ], + [ + 0.577663, + " " + ], + [ + 0.28936, + "\u001b[4mW\u001b[24m" + ], + [ + 0.157708, + "\b\u001b[4mW\u001b[4ma\u001b[24m" + ], + [ + 0.226616, + "\b\u001b[4ma\u001b[4ml\u001b[24m" + ], + [ + 0.106124, + "\b\u001b[4ml\u001b[4ml\u001b[24m" + ], + [ + 0.099397, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.00361, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mecho\u001b[39m \u001b[33m\"added new nice file\"\u001b[39m > \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.822747, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003743, + "\r\r\u001b[40C\u001b[0m\u001b[4m/\u001b[24m" + ], + [ + 0.00018, + "\r\r\n\u001b[J" + ], + [ + 5.1e-05, + "\u001b[38;5;33m2048example\u001b[0m/ \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mecho\u001b[39m \u001b[33m\"added new nice file\"\u001b[39m > \u001b[4mWallpaper/\u001b[24m\u001b[K" + ], + [ + 1.173525, + "\u001b[10D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/n" + ], + [ + 0.118482, + "e" + ], + [ + 0.130187, + "w" + ], + [ + 0.499912, + "f" + ], + [ + 0.161863, + "i" + ], + [ + 0.13679, + "l" + ], + [ + 0.093681, + "e" + ], + [ + 0.261183, + "." + ], + [ + 0.312651, + "t" + ], + [ + 0.10665, + "x" + ], + [ + 0.131562, + "t" + ], + [ + 0.79879, + "\u001b[?1l\u001b>" + ], + [ + 0.001397, + "\u001b[?2004l\r\r\n\u001b[J" + ], + [ + 0.000679, + "\u001b]2;echo \"added new nice file\" > Wallpaper/newfile.txt\u0007\u001b]1;echo\u0007" + ], + [ + 0.000151, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000183, + "\u001b[?2004h" + ], + [ + 2.785656, + "\u001b[32mecho\u001b[39m \u001b[33m\"added new nice file\"\u001b[39m > \u001b[4mWallpaper/newfile.txt\u001b[24m" + ], + [ + 0.206019, + "\u001b[50D\u001b[1m#\u001b[1m \u001b[1mS\u001b[1mo\u001b[1m \u001b[1ml\u001b[1me\u001b[1mt\u001b[1m'\u001b[1ms\u001b[1m \u001b[1ma\u001b[1md\u001b[1md\u001b[1m \u001b[1ma\u001b[1m \u001b[1mn\u001b[1me\u001b[1mw\u001b[1m \u001b[1mf\u001b[1mi\u001b[1ml\u001b[1me\u001b[1m…\u001b[0m\u001b[39m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24m \u001b[24D" + ], + [ + 0.251309, + "\u001b[24D\u001b[1mT\u001b[1mh\u001b[1ma\u001b[1mt\u001b[1m'\u001b[1ms\u001b[1m \u001b[1mn\u001b[1mi\u001b[1mc\u001b[1me\u001b[1m,\u001b[1m \u001b[1ms\u001b[1mo\u001b[1m \u001b[1mf\u001b[1ma\u001b[1mr\u001b[1m.\u001b[0m\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \b\b\b\b" + ], + [ + 0.372268, + "\u001b[22D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mc\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39me\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ms\u001b[0m\u001b[39mt\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39ms\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-progress --compression lz4 /media/backup/borgdemo::backup1 \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 0.686798, + "\b" + ], + [ + 0.49974, + "\b" + ], + [ + 0.029256, + "\b" + ], + [ + 0.030383, + "\b" + ], + [ + 0.030965, + "\b" + ], + [ + 0.02928, + "\b" + ], + [ + 0.030139, + "\b" + ], + [ + 0.029254, + "\b" + ], + [ + 0.03083, + "\b" + ], + [ + 0.030284, + "\b" + ], + [ + 0.030187, + "\b" + ], + [ + 0.030317, + "\b" + ], + [ + 0.439014, + "\u001b[1C" + ], + [ + 0.357869, + "\u001b[P\u001b[10C \u001b[11D" + ], + [ + 0.141225, + "2\u001b[24m \u001b[4mW\u001b[4ma\u001b[4ml\u001b[4ml\u001b[4mp\u001b[4ma\u001b[4mp\u001b[4me\u001b[4mr\u001b[24m\u001b[10D" + ], + [ + 0.615794, + "\u001b[?1l\u001b>" + ], + [ + 0.001653, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000779, + "\u001b]2;borg create --stats --progress --compression lz4 Wallpaper\u0007\u001b]1;borg\u0007" + ], + [ + 0.627474, + "Enter passphrase for key /media/backup/borgdemo: " + ], + [ + 3.666123, + "\r\n" + ], + [ + 0.128711, + "0 B O 0 B C 0 B D 0 N Wallpaper \r" + ], + [ + 0.006399, + "Initializing cache transaction: Reading config \r" + ], + [ + 0.000208, + "Initializing cache transaction: Reading chunks \r" + ], + [ + 0.000253, + "Initializing cache transaction: Reading files \r" + ], + [ + 0.000269, + " \r" + ], + [ + 0.247567, + "584.80 MB O 584.09 MB C 65 B D 1011 N Wallpaper/newfile.txt \r" + ], + [ + 0.264517, + "Compacting segments 0% \r" + ], + [ + 0.000942, + "Compacting segments 50% \r" + ], + [ + 4e-05, + " \r" + ], + [ + 0.0606, + "Saving files cache \r" + ], + [ + 0.005405, + "Saving chunks cache \r" + ], + [ + 0.000411, + "Saving cache config \r" + ], + [ + 0.079766, + " \r" + ], + [ + 4.7e-05, + " \r" + ], + [ + 0.000375, + "------------------------------------------------------------------------------\r\n" + ], + [ + 2.4e-05, + "Archive name: backup2\r\n" + ], + [ + 2.7e-05, + "Archive fingerprint: 5aaf03d1c710cf774f9c9ff1c6317b621c14e519c6bac459f6d64b31e3bbd200\r\n" + ], + [ + 0.000102, + "Time (start): Fri, 2017-07-14 21:54:56\r\n" + ], + [ + 2.1e-05, + "Time (end): Fri, 2017-07-14 21:54:56\r\nDuration: 0.33 seconds\r\n" + ], + [ + 7.4e-05, + "Number of files: 1051\r\n" + ], + [ + 8.3e-05, + "Utilization of maximum supported archive size: 0%\r\n------------------------------------------------------------------------------\r\n" + ], + [ + 7e-06, + " Original size Compressed size Deduplicated size\r\n" + ], + [ + 2.8e-05, + "This archive: 618.96 MB 617.47 MB 106.70 kB\r\n" + ], + [ + 2.2e-05, + "All archives: 1.24 GB 1.23 GB 561.77 MB\r\n" + ], + [ + 5.3e-05, + "\r\n" + ], + [ + 7e-06, + " Unique chunks Total chunks\r\n" + ], + [ + 2.2e-05, + "Chunk index: 1002 2187\r\n" + ], + [ + 2.3e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 0.046167, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 8.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000212, + "\u001b[?2004h" + ], + [ + 1.922718, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.225243, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.166756, + "\b\b\u001b[1m#\u001b[1m \u001b[1mW\u001b[0m\u001b[39m" + ], + [ + 0.162323, + "\b\u001b[1mW\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.097757, + "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.265877, + "\b\u001b[1mw\u001b[1m, this was a lot faster!\u001b[0m\u001b[39m" + ], + [ + 0.789811, + "\u001b[?1l\u001b>" + ], + [ + 0.000392, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000754, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 7.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000273, + "\u001b[?2004h" + ], + [ + 1.15181, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.234049, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.209548, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.168421, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.232312, + "\b\u001b[1mo\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.201133, + "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.338758, + "\b\u001b[1mi\u001b[1mce the \"Deduplicated size\" in \"This archive\"?\u001b[0m\u001b[39m" + ], + [ + 2.236964, + "\u001b[?1l\u001b>" + ], + [ + 0.000951, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001084, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 9.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000505, + "\u001b[?2004h" + ], + [ + 2.51909, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.240091, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.216793, + "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" + ], + [ + 0.192027, + "\b\u001b[1mB\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.138706, + "\b\u001b[1mo\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.129501, + "\b\u001b[1mr\u001b[1mg\u001b[0m\u001b[39m" + ], + [ + 0.536844, + "\b\u001b[1mg\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.143314, + "\b\u001b[1m \u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.138384, + "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.197658, + "\b\u001b[1me\u001b[1mcognized that most files did not change and deduplicated them.\u001b[0m\u001b[39m" + ], + [ + 1.432604, + "\u001b[?1l\u001b>" + ], + [ + 0.000397, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00069, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 6.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000319, + "\u001b[?2004h" + ], + [ + 1.153873, + "\u001b[?1l\u001b>" + ], + [ + 0.000537, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000623, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000101, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000108, + "\u001b[?1h\u001b=" + ], + [ + 0.000309, + "\u001b[?2004h" + ], + [ + 0.447325, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.257975, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.210602, + "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" + ], + [ + 0.182148, + "\b\u001b[1mB\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.159923, + "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.165905, + "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.175925, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.116184, + "\b\u001b[1mw\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.125029, + "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.110311, + "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.26718, + "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.393846, + "\b\u001b[1m \u001b[1mhappens, when we move a dir and create a new backup?\u001b[0m\u001b[39m" + ], + [ + 1.840157, + "\u001b[?1l\u001b>" + ], + [ + 0.000398, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000678, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000105, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000102, + "\u001b[?1h\u001b=" + ], + [ + 0.000242, + "\u001b[?2004h" + ], + [ + 1.044202, + "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m" + ], + [ + 0.167573, + "\b\u001b[0m\u001b[32mm\u001b[32mv\u001b[39m" + ], + [ + 0.203794, + " " + ], + [ + 0.199502, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.002962, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.399299, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.004451, + "\r\r\u001b[14C\u001b[0m\u001b[4m/\u001b[24m" + ], + [ + 0.000168, + "\r\r\n\u001b[J" + ], + [ + 3.2e-05, + "\u001b[38;5;33m2048example\u001b[0m/ \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/\u001b[24m\u001b[K" + ], + [ + 0.416097, + "\u001b[?7l" + ], + [ + 1.3e-05, + "\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.002339, + "\u001b[10D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/2048example\u001b[1m/\u001b[0m" + ], + [ + 0.000184, + "\r\r\n" + ], + [ + 0.000156, + "\u001b[7m2048example/ \u001b[0m \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \u001b[K\r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.23342, + "\r\r\n" + ], + [ + 1.4e-05, + "\u001b[7m2048example/ \u001b[0m \r\u001b[7m2048example/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K\u001b[12Dbigcollecti\u001b[0mon\u001b[1m/\u001b[0m" + ], + [ + 0.000154, + "\r\r\n" + ], + [ + 2.5e-05, + "\u001b[38;5;33m2048example\u001b[0m/ \r\u001b[1B\u001b[7mbigcollection/\u001b[0m \r\u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m Wallpaper/bigcollection\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.378809, + "\r\r\n\u001b[J\u001b[A\u001b[29C" + ], + [ + 0.002159, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m\u001b[K\u001b[1C" + ], + [ + 0.35586, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.007824, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.248908, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.002608, + "\r\r\u001b[38C\u001b[0m\u001b[4m/\u001b[24m" + ], + [ + 0.000171, + "\r\r\n\u001b[J" + ], + [ + 5.4e-05, + "\u001b[38;5;33m2048example\u001b[0m/ \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m \u001b[4mWallpaper/\u001b[24m\u001b[K" + ], + [ + 0.248788, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.004567, + "\u001b[10D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/2048example\u001b[1m/\u001b[0m" + ], + [ + 0.000182, + "\r\r\n" + ], + [ + 9.1e-05, + "\u001b[7m2048example/ \u001b[0m \u001b[38;5;13mdeer.jpg\u001b[0m \u001b[38;5;33mmore\u001b[0m/ \u001b[K\r\n\u001b[J\u001b[38;5;33mbigcollection\u001b[0m/ \u001b[J\u001b[38;5;33mevenmore\u001b[0m/ \u001b[Jnewfile.txt \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.24704, + "\r\r\n" + ], + [ + 3.2e-05, + "\u001b[7m2048example/ \u001b[0m \r\u001b[7m2048example/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m Wallpaper/2048example\u001b[1m/\u001b[0m\u001b[K\u001b[12Dbigcollecti\u001b[0mon\u001b[1m/\u001b[0m" + ], + [ + 0.000389, + "\r\r\n" + ], + [ + 3e-05, + "\u001b[38;5;33m2048example\u001b[0m/ \r\u001b[1B\u001b[7mbigcollection/\u001b[0m \r\u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m Wallpaper/bigcollection\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.595335, + "\r\r\n\u001b[J\u001b[A\u001b[53C" + ], + [ + 0.003755, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper/bigcollection\u001b[24m \u001b[4mWallpaper/bigcollection\u001b[24m\u001b[K\u001b[1C" + ], + [ + 0.271014, + "\b" + ], + [ + 0.554135, + "\u001b[23D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[24m/\u001b[24mb\u001b[24mi\u001b[24mg\u001b[24mc\u001b[24mo\u001b[24ml\u001b[24ml\u001b[24me\u001b[24mc\u001b[24mt\u001b[24mi\u001b[24mo\u001b[24mn_" + ], + [ + 0.317529, + "N" + ], + [ + 0.104435, + "E" + ], + [ + 0.175308, + "W" + ], + [ + 0.956051, + "\u001b[?1l\u001b>" + ], + [ + 0.001192, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000754, + "\u001b]2;mv -i Wallpaper/bigcollection Wallpaper/bigcollection_NEW\u0007\u001b]1;mv\u0007" + ], + [ + 0.001182, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 9.8e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.7e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000188, + "\u001b[?2004h" + ], + [ + 1.857261, + "\u001b[32mmv\u001b[39m Wallpaper/bigcollection \u001b[4mWallpaper/bigcollection_NEW\u001b[24m" + ], + [ + 0.208181, + "\u001b[54D\u001b[1m#\u001b[1m \u001b[1mB\u001b[1mu\u001b[1mt\u001b[1m \u001b[1mw\u001b[1mh\u001b[1ma\u001b[1mt\u001b[1m \u001b[1mh\u001b[1ma\u001b[1mp\u001b[1mp\u001b[1me\u001b[1mn\u001b[1ms\u001b[1m,\u001b[1m \u001b[1mw\u001b[1mh\u001b[1me\u001b[1mn\u001b[1m \u001b[1mw\u001b[1me\u001b[24m\u001b[1m \u001b[24m\u001b[1mm\u001b[24m\u001b[1mo\u001b[24m\u001b[1mv\u001b[24m\u001b[1me\u001b[24m\u001b[1m \u001b[24m\u001b[1ma\u001b[24m\u001b[1m \u001b[24m\u001b[1md\u001b[24m\u001b[1mi\u001b[24m\u001b[1mr\u001b[24m\u001b[1m \u001b[24m\u001b[1ma\u001b[24m\u001b[1mn\u001b[24m\u001b[1md\u001b[24m\u001b[1m \u001b[24m\u001b[1mc\u001b[24m\u001b[1mr\u001b[24m\u001b[1me\u001b[24m\u001b[1ma\u001b[24m\u001b[1mt\u001b[24m\u001b[1me\u001b[24m\u001b[1m \u001b[24m\u001b[1ma\u001b[24m\u001b[1m \u001b[24m\u001b[1mn\u001b[24m\u001b[1me\u001b[1mw backup?\u001b[0m\u001b[39m" + ], + [ + 0.2399, + "\u001b[60D\u001b[1mo\u001b[1mr\u001b[1mg\u001b[1m \u001b[1mr\u001b[1me\u001b[1mc\u001b[1mo\u001b[1mg\u001b[1mn\u001b[1mi\u001b[1mz\u001b[1me\u001b[1md\u001b[1m \u001b[1mt\u001b[1mh\u001b[1ma\u001b[1mt\u001b[1m \u001b[1mm\u001b[1mo\u001b[1ms\u001b[1mt\u001b[1m \u001b[1mf\u001b[1mi\u001b[1ml\u001b[1me\u001b[1ms\u001b[1m \u001b[1md\u001b[1mi\u001b[1md\u001b[1m \u001b[1mn\u001b[1mo\u001b[1mt\u001b[1m \u001b[1mc\u001b[1mh\u001b[1ma\u001b[1mn\u001b[1mg\u001b[1me\u001b[1m \u001b[1ma\u001b[1mn\u001b[1md\u001b[1m \u001b[1md\u001b[1me\u001b[1md\u001b[1mu\u001b[1mp\u001b[1ml\u001b[1mi\u001b[1mc\u001b[1ma\u001b[1mt\u001b[1med them.\u001b[0m\u001b[39m" + ], + [ + 0.227963, + "\u001b[69D\u001b[1mN\u001b[1mo\u001b[1mt\u001b[1mi\u001b[1mc\u001b[1me\u001b[1m \u001b[1mt\u001b[1mh\u001b[1me\u001b[1m \u001b[1m\"\u001b[1mD\u001b[2C\u001b[0m\u001b[39m\u001b[39P\u001b[10C\u001b[1ms\u001b[1mi\u001b[1mz\u001b[1me\u001b[1m\"\u001b[1m in \"This archive\"?\u001b[0m\u001b[39m \u001b[20D" + ], + [ + 0.344233, + "\u001b[49D\u001b[1mW\u001b[1mo\u001b[1mw\u001b[1m,\u001b[1m \u001b[1mt\u001b[1mh\u001b[1mi\u001b[1ms\u001b[1m \u001b[1mw\u001b[1ma\u001b[1ms\u001b[1m \u001b[1ma\u001b[1m \u001b[1ml\u001b[1mo\u001b[1mt\u001b[1m \u001b[1mf\u001b[1ma\u001b[1ms\u001b[1mt\u001b[1me\u001b[1mr\u001b[1m!\u001b[0m\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[0m\u001b[39m \u001b[22D" + ], + [ + 0.396096, + "\u001b[29D\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[0m\u001b[32mg\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39mc\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39me\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ms\u001b[0m\u001b[39mt\u001b[0m\u001b[39ma\u001b[0m\u001b[39mt\u001b[0m\u001b[39ms\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39mp\u001b[0m\u001b[39mr\u001b[0m\u001b[39mo\u001b[0m\u001b[39mg\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39mss --compression lz4 /media/backup/borgdemo::backup2 \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 0.854343, + "\b" + ], + [ + 0.192067, + "\b" + ], + [ + 0.161921, + "\b" + ], + [ + 0.152949, + "\b" + ], + [ + 0.158914, + "\b" + ], + [ + 0.150013, + "\b" + ], + [ + 0.168061, + "\b" + ], + [ + 0.170964, + "\b" + ], + [ + 0.156237, + "\b" + ], + [ + 0.161813, + "\b" + ], + [ + 0.698972, + "\b\u001b[P\u001b[10C \u001b[11D" + ], + [ + 0.185005, + "3\u001b[24m \u001b[4mW\u001b[4ma\u001b[4ml\u001b[4ml\u001b[4mp\u001b[4ma\u001b[4mp\u001b[4me\u001b[4mr\u001b[24m\u001b[10D" + ], + [ + 0.670037, + "\u001b[?1l\u001b>" + ], + [ + 0.002029, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000793, + "\u001b]2;borg create --stats --progress --compression lz4 Wallpaper\u0007\u001b]1;borg\u0007" + ], + [ + 0.621587, + "Enter passphrase for key /media/backup/borgdemo: " + ], + [ + 5.034162, + "\r\n" + ], + [ + 0.136527, + "0 B O 0 B C 0 B D 0 N Wallpaper \r" + ], + [ + 0.028491, + "Initializing cache transaction: Reading config \r" + ], + [ + 0.000245, + "Initializing cache transaction: Reading chunks \r" + ], + [ + 0.000278, + "Initializing cache transaction: Reading files \r" + ], + [ + 0.000296, + " \r" + ], + [ + 0.173817, + "10.07 MB O 10.04 MB C 0 B D 17 N Wallpaper/bigcollec...rland__England____A.jpg\r" + ], + [ + 0.20311, + "29.10 MB O 29.05 MB C 0 B D 50 N Wallpaper/bigcollec...Creek_Redwoods_Stat.jpg\r" + ], + [ + 0.202422, + "47.67 MB O 47.62 MB C 0 B D 83 N Wallpaper/bigcollec...rson-Wildschutzgebi.jpg\r" + ], + [ + 0.216811, + "64.30 MB O 64.19 MB C 0 B D 112 N Wallpaper/bigcollec..._Planten_un_Blomen.jpg\r" + ], + [ + 0.214409, + "80.89 MB O 80.75 MB C 0 B D 140 N Wallpaper/bigcollec...g__Cologne__German.jpg\r" + ], + [ + 0.202244, + "100.45 MB O 100.26 MB C 0 B D 173 N Wallpaper/bigcolle..._Menorca__Spanien.jpg\r" + ], + [ + 0.202027, + "116.80 MB O 116.61 MB C 0 B D 202 N Wallpaper/bigcolle...artenkirchen__Bay.jpg\r" + ], + [ + 0.202003, + "130.38 MB O 130.15 MB C 0 B D 227 N Wallpaper/bigcolle..._zur_Felsenkirche.jpg\r" + ], + [ + 0.234918, + "143.32 MB O 143.09 MB C 0 B D 251 N Wallpaper/bigcolle...land__Antarktis__.jpg\r" + ], + [ + 0.204976, + "156.31 MB O 156.07 MB C 0 B D 275 N Wallpaper/bigcolle...-Stadion__Rio_de_.jpg\r" + ], + [ + 0.205408, + "173.36 MB O 173.09 MB C 0 B D 304 N Wallpaper/bigcolle...lpark__Alaska__US.jpg\r" + ], + [ + 0.221776, + "183.65 MB O 183.35 MB C 0 B D 322 N Wallpaper/bigcolle...lmeer____Pasquale.jpg\r" + ], + [ + 0.201052, + "195.95 MB O 195.63 MB C 0 B D 345 N Wallpaper/bigcolle...Schutzgebiet_Mary.jpg\r" + ], + [ + 0.240687, + "217.22 MB O 216.88 MB C 0 B D 382 N Wallpaper/bigcolle...__Kappadokien__T_.jpg\r" + ], + [ + 0.20767, + "233.09 MB O 232.68 MB C 0 B D 409 N Wallpaper/bigcolle...epublic_ImagesShu.jpg\r" + ], + [ + 0.210433, + "250.21 MB O 249.81 MB C 0 B D 439 N Wallpaper/bigcolle...ter__Pr_fektur_Fu.jpg\r" + ], + [ + 0.200954, + "268.90 MB O 268.51 MB C 0 B D 472 N Wallpaper/bigcolle...uth_Carolina__USA.jpg\r" + ], + [ + 0.212828, + "286.72 MB O 286.35 MB C 0 B D 502 N Wallpaper/bigcolle...l_Park__Cobham__E.jpg\r" + ], + [ + 0.206527, + "296.84 MB O 296.47 MB C 0 B D 518 N Wallpaper/bigcolle...entAlamy______Bin.jpg\r" + ], + [ + 0.205003, + "310.38 MB O 310.00 MB C 0 B D 542 N Wallpaper/bigcolle...ationalpark__Flor.jpg\r" + ], + [ + 0.209538, + "320.38 MB O 320.03 MB C 0 B D 559 N Wallpaper/bigcolle...ma__Bahamas____Ji.jpg\r" + ], + [ + 0.201896, + "331.76 MB O 331.35 MB C 0 B D 580 N Wallpaper/bigcolle...rd_Bay__Eyre-Halb.jpg\r" + ], + [ + 0.207585, + "347.40 MB O 346.96 MB C 0 B D 606 N Wallpaper/bigcolle...s_Atlantischen_Oz.jpg\r" + ], + [ + 0.200781, + "369.05 MB O 368.62 MB C 0 B D 640 N Wallpaper/bigcolle...ankreich____John_.jpg\r" + ], + [ + 0.202326, + "379.22 MB O 378.78 MB C 0 B D 657 N Wallpaper/bigcolle...chtanemone__Insel.jpg\r" + ], + [ + 0.211929, + "389.83 MB O 389.36 MB C 0 B D 676 N Wallpaper/bigcolle...ugal____Mikael_Sv.jpg\r" + ], + [ + 0.219553, + "402.12 MB O 401.68 MB C 0 B D 695 N Wallpaper/bigcolle...rk_Sarek__Schwede.jpg\r" + ], + [ + 0.20375, + "416.03 MB O 415.48 MB C 0 B D 718 N Wallpaper/bigcolle...em_taubenetzten_G.jpg\r" + ], + [ + 0.201474, + "428.93 MB O 428.38 MB C 0 B D 742 N Wallpaper/bigcolle...Francisco_Bay__Ka.jpg\r" + ], + [ + 0.200248, + "437.92 MB O 437.35 MB C 0 B D 756 N Wallpaper/bigcolle..._der_N_he_von_Tro.jpg\r" + ], + [ + 0.215254, + "446.04 MB O 445.46 MB C 0 B D 770 N Wallpaper/bigcolle...enver__Colorado__.jpg\r" + ], + [ + 0.202133, + "455.95 MB O 455.36 MB C 0 B D 787 N Wallpaper/bigcolle..._Son_Doong-H_hle_.jpg\r" + ], + [ + 0.208499, + "471.36 MB O 470.71 MB C 0 B D 816 N Wallpaper/bigcolle...ly_National_Monum.jpg\r" + ], + [ + 0.205116, + "491.46 MB O 490.81 MB C 0 B D 853 N Wallpaper/bigcolle...ted_during_the_ 1.jpg\r" + ], + [ + 0.220215, + "510.73 MB O 510.07 MB C 0 B D 887 N Wallpaper/bigcolle..._Blitzeinschlag_i.jpg\r" + ], + [ + 0.201825, + "522.32 MB O 521.65 MB C 0 B D 906 N Wallpaper/bigcolle...vador__Santiago__.jpg\r" + ], + [ + 0.202937, + "534.02 MB O 533.34 MB C 0 B D 925 N Wallpaper/bigcolle...doah_National_Par.jpg\r" + ], + [ + 0.202635, + "550.50 MB O 549.83 MB C 0 B D 951 N Wallpaper/bigcolle...liffs_National_Mo.jpg\r" + ], + [ + 0.202296, + "564.18 MB O 563.47 MB C 0 B D 976 N Wallpaper/bigcolle...n_in_Aktion____Va.jpg\r" + ], + [ + 0.203791, + "576.43 MB O 575.71 MB C 0 B D 996 N Wallpaper/bigcolle...______WRIGHTSuper.jpg\r" + ], + [ + 0.439796, + "Compacting segments 0% \r" + ], + [ + 0.000919, + "Compacting segments 50% \r" + ], + [ + 3.7e-05, + " \r" + ], + [ + 0.040817, + "Saving files cache \r" + ], + [ + 0.010023, + "Saving chunks cache \r" + ], + [ + 0.000278, + "Saving cache config \r" + ], + [ + 0.093829, + " \r" + ], + [ + 1.6e-05, + " \r" + ], + [ + 0.000308, + "------------------------------------------------------------------------------\r\n" + ], + [ + 9e-06, + "Archive name: backup3\r\n" + ], + [ + 3.8e-05, + "Archive fingerprint: 36cd8fdf9b8b2e3bbb3fc2bb600acd48609efaf3a0880f900e0701a47ff69d4d\r\n" + ], + [ + 2e-05, + "Time (start): Fri, 2017-07-14 21:55:37\r\n" + ], + [ + 2.4e-05, + "Time (end): Fri, 2017-07-14 21:55:46\r\n" + ], + [ + 2.2e-05, + "Duration: 8.58 seconds\r\n" + ], + [ + 2.6e-05, + "Number of files: 1051\r\n" + ], + [ + 2.6e-05, + "Utilization of maximum supported archive size: 0%\r\n" + ], + [ + 2.1e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 2.6e-05, + " Original size Compressed size Deduplicated size\r\n" + ], + [ + 2.4e-05, + "This archive: 618.96 MB 617.47 MB 107.55 kB\r\n" + ], + [ + 2.1e-05, + "All archives: 1.86 GB 1.85 GB 561.88 MB\r\n" + ], + [ + 2.5e-05, + "\r\n" + ], + [ + 3.9e-05, + " Unique chunks Total chunks\r\n" + ], + [ + 1.1e-05, + "Chunk index: 1006 3283\r\n" + ], + [ + 4.8e-05, + "------------------------------------------------------------------------------\r\n" + ], + [ + 0.048607, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 1.9e-05, + "\u001b]1;~/Pictures\u0007" + ], + [ + 7.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00017, + "\u001b[?2004h" + ], + [ + 1.509372, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.261334, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.25826, + "\b\b\u001b[1m#\u001b[1m \u001b[1mS\u001b[0m\u001b[39m" + ], + [ + 0.162616, + "\b\u001b[1mS\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.27891, + "\b\u001b[1mt\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.174723, + "\b\u001b[1mi\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.124142, + "\b\u001b[1ml\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 1.012371, + "\b\u001b[1ml\u001b[1m quite fast…\u001b[0m\u001b[39m" + ], + [ + 0.74493, + "\u001b[?1l\u001b>" + ], + [ + 0.000416, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000686, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 6e-06, + "\u001b]1;~/Pictures\u0007" + ], + [ + 8.8e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000111, + "\u001b[?1h\u001b=" + ], + [ + 0.000271, + "\u001b[?2004h" + ], + [ + 2.038818, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.861519, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 2.235116, + "\b\b\u001b[1m#\u001b[1m \u001b[1mB\u001b[0m\u001b[39m" + ], + [ + 0.20981, + "\b\u001b[1mB\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.216676, + "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.72822, + "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 1.094756, + "\b\u001b[1m \u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.315528, + "\b\u001b[1mw\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.23713, + "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.286805, + "\b\u001b[1me\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.638764, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.81778, + "\b\u001b[1m \u001b[1my\u001b[0m\u001b[39m" + ], + [ + 0.245269, + "\b\u001b[1my\u001b[1mou look at the \"deduplicated file size\" again, you see that borg\u001b[1m \u001b[1malso recognized that only the dir and not the files changed in this backup.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.34618, + "\u001b[?1l\u001b>" + ], + [ + 0.000453, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000631, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.00011, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000113, + "\u001b[?1h\u001b=" + ], + [ + 0.000262, + "\u001b[?2004h" + ], + [ + 3.418707, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.275819, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.2004, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.172829, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.308378, + "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.703684, + "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.8183, + "\b\u001b[1m \u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.193322, + "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.18438, + "\b\u001b[1me\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.389996, + "\b\u001b[1mt\u001b[1m's look into a repo.\u001b[0m\u001b[39m" + ], + [ + 0.857879, + "\u001b[?1l\u001b>" + ], + [ + 0.000349, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000564, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 2.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000246, + "\u001b[?2004h" + ], + [ + 1.60039, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.177554, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.117613, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.12982, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.145309, + " " + ], + [ + 0.256078, + "l" + ], + [ + 0.145029, + "i" + ], + [ + 0.100415, + "s" + ], + [ + 0.137667, + "t" + ], + [ + 0.172051, + " " + ], + [ + 0.490083, + "\u001b[4m/\u001b[24m" + ], + [ + 0.190449, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.216676, + "\b\u001b[4mm\u001b[4me\u001b[24m" + ], + [ + 0.174909, + "\b\u001b[4me\u001b[4md\u001b[24m" + ], + [ + 0.242368, + "\u001b[?7l\u001b[31m......\u001b[39m" + ], + [ + 3.2e-05, + "\u001b[?7h" + ], + [ + 0.00599, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.345758, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003294, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.253376, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003389, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 1.036958, + "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" + ], + [ + 2.6e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.000854, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000717, + "\u001b]2;borg list /media/backup/borgdemo\u0007\u001b]1;borg\u0007" + ], + [ + 0.624291, + "Enter passphrase for key /media/backup/borgdemo: " + ], + [ + 2.363577, + "\r\n" + ], + [ + 0.158203, + "backup1 Fri, 2017-07-14 21:54:06 [9758c7db339a066360bffad17b2ffac4fb368c6722c0be3a47a7a9b631f06407]\r\nbackup2 Fri, 2017-07-14 21:54:56 [5aaf03d1c710cf774f9c9ff1c6317b621c14e519c6bac459f6d64b31e3bbd200]\r\nbackup3 Fri, 2017-07-14 21:55:37 [36cd8fdf9b8b2e3bbb3fc2bb600acd48609efaf3a0880f900e0701a47ff69d4d]\r\n" + ], + [ + 0.044143, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.4e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000207, + "\u001b[?2004h" + ], + [ + 5.582312, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.371134, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.184918, + "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" + ], + [ + 0.177123, + "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.148041, + "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.461676, + "\b\u001b[1mu\u001b[1m'\u001b[0m\u001b[39m" + ], + [ + 0.668888, + "\b\u001b[1m'\u001b[1mll see a list of all backups.\u001b[0m\u001b[39m" + ], + [ + 0.876235, + "\u001b[?1l\u001b>" + ], + [ + 0.000363, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001075, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 8.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000297, + "\u001b[?2004h" + ], + [ + 2.475491, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.382591, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.23474, + "\b\b\u001b[1m#\u001b[1m \u001b[1mY\u001b[0m\u001b[39m" + ], + [ + 0.210269, + "\b\u001b[1mY\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.196151, + "\b\u001b[1mo\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.460253, + "\b\u001b[1mu\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.305764, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.184098, + "\b\u001b[1mc\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.212534, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.305097, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.163485, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.194803, + "\b\u001b[1ma\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.282791, + "\b\u001b[1ml\u001b[1mso use the same command to look into an archive. But we better f\u001b[1mi\u001b[1mlter the output here:\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.679252, + "\u001b[?1l\u001b>" + ], + [ + 0.000434, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000646, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000107, + "\u001b[?1h\u001b=" + ], + [ + 0.000302, + "\u001b[?2004h" + ], + [ + 1.162094, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.184756, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.114887, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.143983, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.230507, + " " + ], + [ + 0.414382, + "l" + ], + [ + 0.153591, + "i" + ], + [ + 0.044178, + "s" + ], + [ + 0.236299, + "t" + ], + [ + 0.330148, + " " + ], + [ + 0.70018, + "\u001b[4m/\u001b[24m" + ], + [ + 0.193582, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.172118, + "\b\u001b[4mm\u001b[4me\u001b[24m" + ], + [ + 0.134283, + "\b\u001b[4me\u001b[4md\u001b[24m" + ], + [ + 0.250757, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.006227, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.374078, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003992, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.2609, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003434, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.237963, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003371, + "\r\r\u001b[34C\u001b[0m\u001b[4m/\u001b[24m" + ], + [ + 0.000178, + "\r\r\n\u001b[J" + ], + [ + 4.2e-05, + "\u001b[0mREADME \u001b[38;5;33mdata\u001b[0m/ index.14 nonce \r\n\u001b[Jconfig \u001b[Jhints.14 \u001b[Jintegrity.14 \u001b[J \u001b[A\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mborg\u001b[39m list \u001b[4m/media/backup/borgdemo/\u001b[24m\u001b[K" + ], + [ + 0.833604, + "\b\b\u001b[4mo\u001b[24m\u001b[24m \b" + ], + [ + 1.042199, + "\u001b[22D\u001b[24m/\u001b[24mm\u001b[24me\u001b[24md\u001b[24mi\u001b[24ma\u001b[24m/\u001b[24mb\u001b[24ma\u001b[24mc\u001b[24mk\u001b[24mu\u001b[24mp\u001b[24m/\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24md\u001b[24me\u001b[24mm\u001b[24mo:" + ], + [ + 0.139477, + ":" + ], + [ + 0.711096, + "b" + ], + [ + 0.099664, + "a" + ], + [ + 0.149912, + "c" + ], + [ + 0.16888, + "k" + ], + [ + 0.923931, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.005451, + "\r\r\r\r\n\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m list /media/backup/borgdemo::back\u001b[K" + ], + [ + 0.885297, + "u" + ], + [ + 0.29853, + "p" + ], + [ + 0.456244, + "3" + ], + [ + 1.061844, + " " + ], + [ + 0.589511, + "|" + ], + [ + 0.527539, + " " + ], + [ + 0.343662, + "\u001b[32mg\u001b[39m" + ], + [ + 0.117117, + "\b\u001b[32mg\u001b[32mr\u001b[39m" + ], + [ + 0.124331, + "\b\b\u001b[1m\u001b[31mg\u001b[1m\u001b[31mr\u001b[1m\u001b[31me\u001b[0m\u001b[39m" + ], + [ + 0.726149, + "\b\b\b\u001b[0m\u001b[32mg\u001b[0m\u001b[32mr\u001b[0m\u001b[32me\u001b[32mp\u001b[39m" + ], + [ + 0.198601, + " " + ], + [ + 0.476336, + "\u001b[33m'\u001b[39m" + ], + [ + 0.392009, + "\b\u001b[33m'\u001b[33md\u001b[39m" + ], + [ + 0.627529, + "\b\u001b[33md\u001b[33me\u001b[39m" + ], + [ + 0.142332, + "\b\u001b[33me\u001b[33me\u001b[39m" + ], + [ + 0.322681, + "\b\u001b[33me\u001b[33mr\u001b[39m" + ], + [ + 0.916328, + "\b\u001b[33mr\u001b[33m.\u001b[39m" + ], + [ + 0.50653, + "\b\u001b[33m.\u001b[33mj\u001b[39m" + ], + [ + 0.242318, + "\b\u001b[33mj\u001b[33mp\u001b[39m" + ], + [ + 0.272214, + "\b\u001b[33mp\u001b[33mg\u001b[39m" + ], + [ + 0.581098, + "\b\u001b[33mg\u001b[33m'\u001b[39m" + ], + [ + 2.559186, + "\u001b[?1l\u001b>" + ], + [ + 0.001382, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000773, + "\u001b]2;borg list /media/backup/borgdemo::backup3 | grep --color 'deer.jpg'\u0007\u001b]1;borg\u0007" + ], + [ + 0.628501, + "Enter passphrase for key /media/backup/borgdemo: " + ], + [ + 2.584332, + "\r\n" + ], + [ + 0.141205, + "-rw-rw-r-- rugk rugk 3781749 Fri, 2017-07-14 17:01:45 Wallpaper/\u001b[01;31m\u001b[Kdeer.jpg\u001b[m\u001b[K\r\n" + ], + [ + 0.054041, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000135, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 2.7e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00017, + "\u001b[?2004h" + ], + [ + 2.222435, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.269828, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.211035, + "\b\b\u001b[1m#\u001b[1m \u001b[1mO\u001b[0m\u001b[39m" + ], + [ + 0.184712, + "\b\u001b[1mO\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.374912, + "\b\u001b[1mh\u001b[1m, we found our picture. Now extract it:\u001b[0m\u001b[39m" + ], + [ + 1.545747, + "\u001b[?1l\u001b>" + ], + [ + 0.000418, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00063, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 3.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000135, + "\u001b[?1h\u001b=" + ], + [ + 0.000463, + "\u001b[?2004h" + ], + [ + 1.638625, + "\u001b[1m\u001b[31mm\u001b[0m\u001b[39m" + ], + [ + 0.156977, + "\b\u001b[0m\u001b[32mm\u001b[32mv\u001b[39m" + ], + [ + 0.220013, + " " + ], + [ + 0.151118, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.002944, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.668654, + "\b\b\u001b[4mr\u001b[24m\u001b[0m\u001b[24m " + ], + [ + 0.297169, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.005693, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mmv\u001b[39m \u001b[4mWallpaper\u001b[24m \u001b[4mWallpaper\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.672973, + "\b\b\u001b[4mr\u001b[24m\u001b[0m\u001b[24m \b" + ], + [ + 0.263416, + "\u001b[9D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr." + ], + [ + 0.334671, + "o" + ], + [ + 0.19768, + "r" + ], + [ + 0.142283, + "i" + ], + [ + 0.17833, + "g" + ], + [ + 0.688576, + "\u001b[?1l\u001b>" + ], + [ + 0.001806, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000954, + "\u001b]2;mv -i Wallpaper Wallpaper.orig\u0007\u001b]1;mv\u0007" + ], + [ + 0.002076, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000153, + "\u001b[?2004h" + ], + [ + 1.864942, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.18048, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.143872, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.161829, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.170439, + " " + ], + [ + 0.248909, + "c" + ], + [ + 0.365319, + "\b \b" + ], + [ + 0.142233, + "e" + ], + [ + 0.157272, + "x" + ], + [ + 0.166861, + "t" + ], + [ + 0.115114, + "r" + ], + [ + 0.103674, + "a" + ], + [ + 0.102162, + "c" + ], + [ + 0.163264, + "t" + ], + [ + 0.308166, + " " + ], + [ + 1.386497, + "\u001b[4m/\u001b[24m" + ], + [ + 0.183134, + "\b\u001b[4m/\u001b[4mm\u001b[24m" + ], + [ + 0.115533, + "\b\u001b[4mm\u001b[4me\u001b[24m" + ], + [ + 0.12416, + "\b\u001b[4me\u001b[4md\u001b[24m" + ], + [ + 0.206989, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003179, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m extract \u001b[4m/media\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.241808, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003324, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m extract \u001b[4m/media/backup\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.193552, + "\u001b[?7l" + ], + [ + 2.6e-05, + "\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003368, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mborg\u001b[39m extract \u001b[4m/media/backup/borgdemo\u001b[1m\u001b[4m/\u001b[0m\u001b[24m\u001b[K" + ], + [ + 0.700774, + "\b\b\u001b[4mo\u001b[24m\u001b[0m\u001b[24m \b" + ], + [ + 1.151074, + "\u001b[22D\u001b[24m/\u001b[24mm\u001b[24me\u001b[24md\u001b[24mi\u001b[24ma\u001b[24m/\u001b[24mb\u001b[24ma\u001b[24mc\u001b[24mk\u001b[24mu\u001b[24mp\u001b[24m/\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24md\u001b[24me\u001b[24mm\u001b[24mo:" + ], + [ + 0.146222, + ":" + ], + [ + 0.579644, + "b" + ], + [ + 0.102789, + "a" + ], + [ + 0.178851, + "c" + ], + [ + 0.133936, + "k" + ], + [ + 0.124089, + "u" + ], + [ + 0.229823, + "p" + ], + [ + 0.174738, + "3" + ], + [ + 0.306821, + " " + ], + [ + 4.287483, + "\u001b[45D\u001b[39mb\u001b[39mo\u001b[39mr\u001b[39mg\u001b[41C\u001b[7mWallpaper/deer.jpg\u001b[27m" + ], + [ + 1.718396, + "\u001b[63D\u001b[32mb\u001b[32mo\u001b[32mr\u001b[32mg\u001b[39m\u001b[41C\u001b[27mW\u001b[27ma\u001b[27ml\u001b[27ml\u001b[27mp\u001b[27ma\u001b[27mp\u001b[27me\u001b[27mr\u001b[27m/\u001b[27md\u001b[27me\u001b[27me\u001b[27mr\u001b[27m.\u001b[27mj\u001b[27mp\u001b[27mg" + ], + [ + 6.4e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.001749, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000991, + "\u001b]2;borg extract /media/backup/borgdemo::backup3 Wallpaper/deer.jpg\u0007\u001b]1;borg\u0007" + ], + [ + 0.633044, + "Enter passphrase for key /media/backup/borgdemo: " + ], + [ + 2.659432, + "\r\n" + ], + [ + 0.198939, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000134, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 7.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000169, + "\u001b[?2004h" + ], + [ + 4.506682, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.287992, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.13604, + "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.132241, + "\b\u001b[1mA\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.115152, + "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" + ], + [ + 0.190449, + "\b\u001b[1md\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.168765, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.248816, + "\b\u001b[1mc\u001b[1mheck that it's the same:\u001b[0m\u001b[39m" + ], + [ + 1.093037, + "\u001b[?1l\u001b>" + ], + [ + 0.000401, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000745, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 7.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.0001, + "\u001b[?1h\u001b=" + ], + [ + 0.000321, + "\u001b[?2004h" + ], + [ + 1.350298, + "\u001b[32md\u001b[39m" + ], + [ + 0.181769, + "\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[0m\u001b[39m" + ], + [ + 0.148155, + "\b\b\u001b[1m\u001b[31md\u001b[1m\u001b[31mi\u001b[1m\u001b[31mf\u001b[0m\u001b[39m" + ], + [ + 0.13874, + "\b\b\b\u001b[0m\u001b[32md\u001b[0m\u001b[32mi\u001b[0m\u001b[32mf\u001b[32mf\u001b[39m" + ], + [ + 0.321772, + " " + ], + [ + 0.410311, + "-" + ], + [ + 0.160707, + "s" + ], + [ + 0.223167, + " " + ], + [ + 0.856546, + "\u001b[4mW\u001b[24m" + ], + [ + 0.184551, + "\b\u001b[4mW\u001b[4ma\u001b[24m" + ], + [ + 0.211734, + "\b\u001b[4ma\u001b[4ml\u001b[24m" + ], + [ + 0.115481, + "\b\u001b[4ml\u001b[4ml\u001b[24m" + ], + [ + 0.13804, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.007132, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 0.620064, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.004082, + "\r\r\u001b[19C" + ], + [ + 0.000148, + "\r\r\n\u001b[J\u001b[J\u001b[38;5;33mWallpaper\u001b[0m/ \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 0.83944, + "\u001b[?7l" + ], + [ + 2.4e-05, + "\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.003487, + "\u001b[9D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[1m/\u001b[0m" + ], + [ + 0.000166, + "\r\r\n\u001b[J\u001b[7mWallpaper/ \u001b[0m \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s Wallpaper\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.488495, + "\r\r\n" + ], + [ + 1.6e-05, + "\u001b[7mWallpaper/ \u001b[0m \r\u001b[7mWallpaper/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s Wallpaper\u001b[1m/\u001b[0m\u001b[K\r\r\n\u001b[J\u001b[A\u001b[20C" + ], + [ + 0.001959, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper\u001b[24m\u001b[K\u001b[1C" + ], + [ + 0.285593, + "\b" + ], + [ + 0.303988, + "\b\u001b[4mr\u001b[4m/\u001b[24m" + ], + [ + 0.798187, + "\b\u001b[4m/\u001b[4md\u001b[24m" + ], + [ + 0.241007, + "\b\u001b[4md\u001b[4me\u001b[24m" + ], + [ + 0.21286, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.00579, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m\u001b[1m \u001b[0m\u001b[K" + ], + [ + 1.289271, + "\b\u001b[0m \u001b[4mW\u001b[24m" + ], + [ + 0.148557, + "\b\u001b[4mW\u001b[4ma\u001b[24m" + ], + [ + 0.16621, + "\b\u001b[4ma\u001b[4ml\u001b[24m" + ], + [ + 0.097599, + "\b\u001b[4ml\u001b[4ml\u001b[24m" + ], + [ + 0.111176, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.005059, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 0.431538, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.005176, + "\r\r\u001b[38C" + ], + [ + 0.000155, + "\r\r\n\u001b[J\u001b[J\u001b[38;5;33mWallpaper\u001b[0m/ \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper\u001b[24m\u001b[K" + ], + [ + 0.389092, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.004561, + "\u001b[9D\u001b[24mW\u001b[24ma\u001b[24ml\u001b[24ml\u001b[24mp\u001b[24ma\u001b[24mp\u001b[24me\u001b[24mr\u001b[1m/\u001b[0m" + ], + [ + 0.000155, + "\r\r\n\u001b[J\u001b[7mWallpaper/ \u001b[0m \u001b[J\u001b[38;5;33mWallpaper.orig\u001b[0m/\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C" + ], + [ + 1.3e-05, + "\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m Wallpaper\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.260844, + "\r\r\n" + ], + [ + 3.6e-05, + "\u001b[7mWallpaper/ \u001b[0m \r\u001b[7mWallpaper/ \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m Wallpaper\u001b[1m/\u001b[0m\u001b[K\b\u001b[0m.orig\u001b[1m/\u001b[0m" + ], + [ + 0.000163, + "\r\r\n\u001b[17C\u001b[7mWallpaper.orig/\u001b[0m\r\u001b[38;5;33mWallpaper\u001b[0m/ \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m Wallpaper.orig\u001b[1m/\u001b[0m\u001b[K" + ], + [ + 0.598634, + "\r\r\n\u001b[J\u001b[A\u001b[44C" + ], + [ + 0.002461, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper.orig\u001b[24m\u001b[K\u001b[1C" + ], + [ + 0.275896, + "\b" + ], + [ + 0.321512, + "\b\u001b[4mg\u001b[4m/\u001b[24m" + ], + [ + 1.499007, + "\b\u001b[4m/\u001b[4md\u001b[24m" + ], + [ + 0.165243, + "\b\u001b[4md\u001b[4me\u001b[24m" + ], + [ + 0.260397, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.005274, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mdiff\u001b[39m -s \u001b[4mWallpaper/deer.jpg\u001b[24m \u001b[4mWallpaper.orig/deer.jpg\u001b[24m\u001b[1m \u001b[0m\u001b[K" + ], + [ + 1.658125, + "\b\u001b[0m \b" + ], + [ + 1.5e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.001138, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000783, + "\u001b]2;diff -s Wallpaper/deer.jpg Wallpaper.orig/deer.jpg\u0007\u001b]1;diff\u0007" + ], + [ + 0.057035, + "Files Wallpaper/deer.jpg and Wallpaper.orig/deer.jpg are identical\r\n" + ], + [ + 0.000183, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000114, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000199, + "\u001b[?2004h" + ], + [ + 3.579542, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.624347, + "\b\u001b[0m\u001b[39m \b" + ], + [ + 0.353186, + "\u001b[?1l\u001b>" + ], + [ + 0.000351, + "\u001b[?2004l\r\r\n" + ], + [ + 0.0006, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 2.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.00013, + "\u001b[?1h\u001b=" + ], + [ + 0.000185, + "\u001b[?2004h" + ], + [ + 0.726522, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.358332, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.183839, + "\b\b\u001b[1m#\u001b[1m \u001b[1mA\u001b[0m\u001b[39m" + ], + [ + 0.150451, + "\b\u001b[1mA\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.128839, + "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" + ], + [ + 0.583652, + "\b\u001b[1md\u001b[1m,\u001b[0m\u001b[39m" + ], + [ + 0.152149, + "\b\u001b[1m,\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.240696, + "\b\u001b[1m \u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.130032, + "\b\u001b[1mo\u001b[1mf\u001b[0m\u001b[39m" + ], + [ + 0.306901, + "\b\u001b[1mf\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.181176, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.271007, + "\b\u001b[1mc\u001b[1mourse, we can also create remote repos via ssh when borg is setup\u001b[1m \u001b[1mthere. This command creates a new remote repo in a subdirectory called \"demo\"\u001b[1m:\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.040444, + "\u001b[?1l\u001b>" + ], + [ + 0.000423, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000711, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 6.8e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000297, + "\u001b[?2004h" + ], + [ + 1.613372, + "\u001b[1m\u001b[31mb\u001b[0m\u001b[39m" + ], + [ + 0.204618, + "\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[0m\u001b[39m" + ], + [ + 0.121257, + "\b\b\u001b[1m\u001b[31mb\u001b[1m\u001b[31mo\u001b[1m\u001b[31mr\u001b[0m\u001b[39m" + ], + [ + 0.228506, + "\b\b\b\u001b[0m\u001b[32mb\u001b[0m\u001b[32mo\u001b[0m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.469213, + " " + ], + [ + 0.23811, + "i" + ], + [ + 0.139149, + "n" + ], + [ + 0.157285, + "i" + ], + [ + 0.219101, + "t" + ], + [ + 0.389153, + " " + ], + [ + 0.633813, + "-" + ], + [ + 0.102895, + "-" + ], + [ + 0.267338, + "e" + ], + [ + 0.244036, + "n" + ], + [ + 0.303722, + "c" + ], + [ + 0.117325, + "r" + ], + [ + 0.112606, + "y" + ], + [ + 0.250891, + "p" + ], + [ + 0.258828, + "t" + ], + [ + 0.276877, + "i" + ], + [ + 0.131491, + "o" + ], + [ + 0.206852, + "n" + ], + [ + 0.966102, + "=" + ], + [ + 0.388021, + "r" + ], + [ + 0.146133, + "e" + ], + [ + 0.176939, + "p" + ], + [ + 0.139187, + "o" + ], + [ + 0.273188, + "k" + ], + [ + 0.172429, + "e" + ], + [ + 0.306306, + "y" + ], + [ + 0.851125, + " " + ], + [ + 0.868971, + "b" + ], + [ + 0.261136, + "o" + ], + [ + 0.12143, + "r" + ], + [ + 0.15507, + "g" + ], + [ + 0.186684, + "d" + ], + [ + 0.141974, + "e" + ], + [ + 0.13004, + "m" + ], + [ + 0.172673, + "o" + ], + [ + 1.041475, + "@" + ], + [ + 0.536019, + "r" + ], + [ + 0.02293, + "e" + ], + [ + 0.223755, + "m" + ], + [ + 0.152859, + "o" + ], + [ + 0.222368, + "t" + ], + [ + 0.095106, + "e" + ], + [ + 0.33914, + "s" + ], + [ + 0.213902, + "e" + ], + [ + 0.136448, + "r" + ], + [ + 0.196228, + "v" + ], + [ + 0.171447, + "e" + ], + [ + 0.154296, + "r" + ], + [ + 1.151168, + "." + ], + [ + 0.198973, + "e" + ], + [ + 0.195428, + "x" + ], + [ + 0.163512, + "a" + ], + [ + 0.157805, + "m" + ], + [ + 0.174865, + "p" + ], + [ + 0.103133, + "l" + ], + [ + 0.145276, + "e" + ], + [ + 2.109373, + ":" + ], + [ + 0.494126, + "." + ], + [ + 0.315325, + "/" + ], + [ + 0.182218, + "d" + ], + [ + 0.138815, + "e" + ], + [ + 0.143066, + "m" + ], + [ + 0.17136, + "o" + ], + [ + 1.831712, + "\u001b[?1l\u001b>" + ], + [ + 0.001025, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000824, + "\u001b]2;borg init --encryption=repokey borgdemo@remoteserver.example:./demo\u0007\u001b]1;borg\u0007" + ], + [ + 6.069586, + "Enter new passphrase: " + ], + [ + 2.598936, + "\r\n" + ], + [ + 0.000189, + "Enter same passphrase again: " + ], + [ + 2.044707, + "\r\n" + ], + [ + 0.000198, + "Do you want your passphrase to be displayed for verification? [yN]: " + ], + [ + 1.415539, + "\r\n" + ], + [ + 1.950077, + "\r\nBy default repositories initialized with this version will produce security\r\nerrors if written to with an older version (up to and including Borg 1.0.8).\r\n\r\nIf you want to use these older versions, you can disable the check by running:\r\nborg upgrade --disable-tam 'ssh://borgdemo@remoteserver.example/./demo'\r\n\r\nSee https://borgbackup.readthedocs.io/en/stable/changes.html#pre-1-0-9-manifest-spoofing-vulnerability for details about the security implications.\r\n" + ], + [ + 0.548386, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 9.5e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000105, + "\u001b[?1h\u001b=" + ], + [ + 0.000221, + "\u001b[?2004h" + ], + [ + 0.82377, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.662248, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.610999, + "\b\b\u001b[1m#\u001b[1m \u001b[1mE\u001b[0m\u001b[39m" + ], + [ + 0.267513, + "\b\u001b[1mE\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.185698, + "\b\u001b[1ma\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.161855, + "\b\u001b[1ms\u001b[1my\u001b[0m\u001b[39m" + ], + [ + 0.46273, + "\b\u001b[1my\u001b[1m, isn't it? That's all you need to know for basic usage.\u001b[0m\u001b[39m" + ], + [ + 1.861984, + "\u001b[?1l\u001b>" + ], + [ + 0.001044, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001525, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 6.3e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000316, + "\u001b[?2004h" + ], + [ + 1.009133, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.240205, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.217287, + "\b\b\u001b[1m#\u001b[1m \u001b[1mI\u001b[0m\u001b[39m" + ], + [ + 0.163888, + "\b\u001b[1mI\u001b[1mf\u001b[0m\u001b[39m" + ], + [ + 0.349458, + "\b\u001b[1mf\u001b[1m you want to see more, have a look at the screencast showing the \"advanc\u001b[1me\u001b[1md usage\".\u001b[0m\u001b[39m\u001b[K" + ], + [ + 2.780664, + "\u001b[?1l\u001b>" + ], + [ + 0.000734, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000812, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 4.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000113, + "\u001b[?1h\u001b=" + ], + [ + 0.000299, + "\u001b[?2004h" + ], + [ + 1.119856, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.281915, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.244389, + "\b\b\u001b[1m#\u001b[1m \u001b[1mI\u001b[0m\u001b[39m" + ], + [ + 0.143064, + "\b\u001b[1mI\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.171731, + "\b\u001b[1mn\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.139438, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.388834, + "\b\u001b[1ma\u001b[1mny case, enjoy using borg!\u001b[0m\u001b[39m" + ], + [ + 1.502218, + "\u001b[?1l\u001b>" + ], + [ + 0.000883, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000735, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 9.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000113, + "\u001b[?1h\u001b=" + ], + [ + 0.000498, + "\u001b[?2004h" + ], + [ + 1.273251, + "\u001b[?2004l\r\r\n" + ] + ] +} diff --git a/docs/misc/asciinema/basic.sh b/docs/misc/asciinema/basic.sh new file mode 100644 index 0000000000..743a506d2a --- /dev/null +++ b/docs/misc/asciinema/basic.sh @@ -0,0 +1,53 @@ +# Here you'll see some basic commands to start working with borg. +# Note: This teaser screencast was made with borg version 1.1.0 – older or newer borg versions may behave differently. +# But let's start. + +# First of all, you can always get help: +borg help +# These are a lot of commands, so better we start with a few: +# Let's create a repo on an external drive… +borg init --encryption=repokey /media/backup/borgdemo +# This uses the repokey encryption. You may look at "borg help init" or the online doc at https://borgbackup.readthedocs.io/ for other modes. + +# So now, let's create our first (compressed) backup. +borg create --stats --progress --compression lz4 /media/backup/borgdemo::backup1 Wallpaper + +# That's nice, so far. +# So let's add a new file… +echo "new nice file" > Wallpaper/newfile.txt + + +borg create --stats --progress --compression lz4 /media/backup/borgdemo::backup2 Wallpaper + +# Wow, this was a lot faster! +# Notice the "Deduplicated size" for "This archive"! +# Borg recognized that most files did not change and deduplicated them. + +# But what happens, when we move a dir and create a new backup? +mv … + +borg create --stats --progress --compression lz4 /media/backup/borgdemo::backup3 Wallpaper + +# Still quite fast… +# But when you look at the "deduplicated file size" again, you see that borg also recognized that only the dir and not the files changed in this backup. + +# Now lets look into a repo. +borg list /media/backup/borgdemo + +# You'll see a list of all backups. +# You can also use the same command to look into an archive. But we better filter the output here: +borg list /media/backup/borgdemo::backup3 | grep 'deer.jpg' + +# Oh, we found our picture. Now extract it… +mv Wallpaper Wallpaper.orig +borg extract /media/backup/borgdemo::backup3 + +# And check that it's the same: +diff -s Wallpaper/deer.jpg Wallpaper.orig/deer.jpg + +# And, of course, we can also create remote repos via ssh when borg is setup there. This command creates a new remote repo in a subdirectory called "demo": +borg init --encryption=repokey borgdemo@remoteserver.example:./demo + +# Easy, isn't it? That's all you need to know for basic usage. +# If you want to see more, have a look at the screencast showing the "advanced usage". +# In any case, enjoy using borg! diff --git a/docs/misc/asciinema/install.json b/docs/misc/asciinema/install.json new file mode 100644 index 0000000000..477b1ef276 --- /dev/null +++ b/docs/misc/asciinema/install.json @@ -0,0 +1,1354 @@ +{ + "version": 1, + "width": 78, + "height": 25, + "duration": 140.275038, + "command": null, + "title": null, + "env": { + "TERM": "xterm-256color", + "SHELL": "/bin/zsh" + }, + "stdout": [ + [ + 9.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000108, + "\u001b[?1h\u001b=" + ], + [ + 0.000182, + "\u001b[?2004h" + ], + [ + 0.45774, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.31515, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.220208, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.121752, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.142781, + "\b\u001b[1mh\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.117367, + "\b\u001b[1mi\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.255471, + "\b\u001b[1ms\u001b[1m asciinema will show you the installation of borg as a standalone bina\u001b[1mr\u001b[1my. Usually you only need this if you want to have an up-to-date version of bo\u001b[1mr\u001b[1mg or no package is available for your distro/OS.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 0.563803, + "\u001b[?1l\u001b>" + ], + [ + 0.000412, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000823, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5.9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.2e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.00027, + "\u001b[?2004h" + ], + [ + 2.191111, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.301924, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.21419, + "\b\b\u001b[1m#\u001b[1m \u001b[1mF\u001b[0m\u001b[39m" + ], + [ + 0.117654, + "\b\u001b[1mF\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.198616, + "\b\u001b[1mi\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.101113, + "\b\u001b[1mr\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.107485, + "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.357443, + "\b\u001b[1mt\u001b[1m, we need to download the version, we'd like to install…\u001b[0m\u001b[39m" + ], + [ + 0.516614, + "\u001b[?1l\u001b>" + ], + [ + 0.000826, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000757, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 4.4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000134, + "\u001b[?1h\u001b=" + ], + [ + 0.000598, + "\u001b[?2004h" + ], + [ + 1.411874, + "\u001b[32mw\u001b[39m" + ], + [ + 0.119593, + "\b\u001b[1m\u001b[31mw\u001b[1m\u001b[31mg\u001b[0m\u001b[39m" + ], + [ + 0.13329, + "\b\b\u001b[1m\u001b[31mw\u001b[1m\u001b[31mg\u001b[1m\u001b[31me\u001b[0m\u001b[39m" + ], + [ + 0.127861, + "\b\b\b\u001b[0m\u001b[32mw\u001b[0m\u001b[32mg\u001b[0m\u001b[32me\u001b[32mt\u001b[39m" + ], + [ + 0.324708, + " -q --show-progress https://github.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64\u001b[K" + ], + [ + 0.797801, + "\u001b[?1l\u001b>" + ], + [ + 0.000964, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000631, + "\u001b]2;wget -q --show-progress \u0007\u001b]1;wget\u0007" + ], + [ + 1.306534, + "\rborg-linux64 0%[ ] 0 --.-KB/s " + ], + [ + 0.23185, + "\rborg-linux64 0%[ ] 24.58K 106KB/s " + ], + [ + 0.341907, + "\rborg-linux64 0%[ ] 92.58K 161KB/s " + ], + [ + 0.230021, + "\rborg-linux64 1%[ ] 160.58K 200KB/s " + ], + [ + 0.22577, + "\rborg-linux64 1%[ ] 211.58K 206KB/s " + ], + [ + 0.229246, + "\rborg-linux64 2%[ ] 279.58K 222KB/s " + ], + [ + 0.347713, + "\rborg-linux64 2%[ ] 347.58K 216KB/s " + ], + [ + 0.224636, + "\rborg-linux64 98%[================> ] 12.41M 404KB/s eta 2s " + ], + [ + 0.205977, + "\rborg-linux64 99%[================> ] 12.50M 401KB/s eta 0s " + ], + [ + 0.137036, + "\rborg-linux64 100%[=================>] 12.56M 417KB/s in 39s \r\n" + ], + [ + 0.000872, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000103, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 0.000117, + "\u001b[?1h\u001b=" + ], + [ + 0.000208, + "\u001b[?2004h" + ], + [ + 2.118269, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.266901, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.142975, + "\b\b\u001b[1m#\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.074155, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.167144, + "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" + ], + [ + 0.2241, + "\b\u001b[1md\u001b[1m do not forget the GPG signature…!\u001b[0m\u001b[39m" + ], + [ + 0.596854, + "\u001b[?1l\u001b>" + ], + [ + 0.000696, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000691, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 8.2e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000286, + "\u001b[?2004h" + ], + [ + 1.51737, + "\u001b[1m# and do not forget the GPG signature…!\u001b[0m\u001b[39m" + ], + [ + 0.314759, + "\u001b[39D\u001b[0m\u001b[32mw\u001b[0m\u001b[32mg\u001b[0m\u001b[32me\u001b[0m\u001b[32mt\u001b[39m\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39mq\u001b[0m\u001b[39m \u001b[0m\u001b[39m-\u001b[0m\u001b[39m-\u001b[0m\u001b[39ms\u001b[0m\u001b[39mh\u001b[0m\u001b[39mo\u001b[0m\u001b[39mw\u001b[0m\u001b[39m-\u001b[0m\u001b[39mp\u001b[0m\u001b[39mr\u001b[0m\u001b[39mo\u001b[0m\u001b[39mg\u001b[0m\u001b[39mr\u001b[0m\u001b[39me\u001b[0m\u001b[39ms\u001b[0m\u001b[39ms\u001b[0m\u001b[39m \u001b[0m\u001b[39mh\u001b[0m\u001b[39mt\u001b[0m\u001b[39mt\u001b[0m\u001b[39mp\u001b[0m\u001b[39ms\u001b[0m\u001b[39m:\u001b[0m\u001b[39m/\u001b[0m\u001b[39m/\u001b[0m\u001b[39mg\u001b[0m\u001b[39mi\u001b[0m\u001b[39mt\u001b[0m\u001b[39mh\u001b[0m\u001b[39mu\u001b[0m\u001b[39mb\u001b[0m\u001b[39m.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64\u001b[K" + ], + [ + 1.043903, + "." + ], + [ + 0.207322, + "a" + ], + [ + 0.16952, + "s" + ], + [ + 0.19625, + "c" + ], + [ + 0.359073, + "\u001b[?1l\u001b>" + ], + [ + 0.001424, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000717, + "\u001b]2;wget -q --show-progress \u0007\u001b]1;wget\u0007" + ], + [ + 1.236785, + "\rborg-linux64.asc 0%[ ] 0 --.-KB/s " + ], + [ + 1.8e-05, + "\rborg-linux64.asc 100%[=================>] 819 --.-KB/s in 0s \r\n" + ], + [ + 0.00093, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 7.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 8.7e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000183, + "\u001b[?2004h" + ], + [ + 3.234458, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 1.023301, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.331266, + "\b\b\u001b[1m#\u001b[1m \u001b[1mI\u001b[0m\u001b[39m" + ], + [ + 0.166799, + "\b\u001b[1mI\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.34554, + "\b\u001b[1mn\u001b[1m this case, we have already imported the public key of a borg developer.\u001b[1m \u001b[1mSo we only need to verify it:\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.499971, + "\u001b[?1l\u001b>" + ], + [ + 0.001069, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000922, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000159, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 3.1e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000451, + "\u001b[?2004h" + ], + [ + 2.31724, + "\u001b[32mg\u001b[39m" + ], + [ + 0.151243, + "\b\u001b[32mg\u001b[32mp\u001b[39m" + ], + [ + 0.074305, + "\b\b\u001b[32mg\u001b[32mp\u001b[32mg\u001b[39m" + ], + [ + 0.315686, + " " + ], + [ + 0.345624, + "-" + ], + [ + 0.100203, + "-" + ], + [ + 0.291673, + "v" + ], + [ + 0.11497, + "e" + ], + [ + 0.183055, + "r" + ], + [ + 0.146521, + "i" + ], + [ + 0.11872, + "f" + ], + [ + 0.309865, + "y" + ], + [ + 0.346758, + " " + ], + [ + 0.264902, + "\u001b[4mb\u001b[24m" + ], + [ + 0.307683, + "\u001b[?7l" + ], + [ + 2.1e-05, + "\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.011212, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mgpg\u001b[39m --verify \u001b[4mborg-linux64\u001b[24m\u001b[K" + ], + [ + 0.577848, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.015636, + "\r\r\u001b[27C" + ], + [ + 0.000193, + "\r\r\n\u001b[J" + ], + [ + 2e-05, + "\u001b[J\u001b[0mborg-linux64 \u001b[Jborg-linux64.asc\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify \u001b[4mborg-linux64\u001b[24m\u001b[K" + ], + [ + 0.626316, + "\u001b[?7l\u001b[31m......\u001b[39m\u001b[?7h" + ], + [ + 0.012642, + "\u001b[12D\u001b[24mb\u001b[24mo\u001b[24mr\u001b[24mg\u001b[24m-\u001b[24ml\u001b[24mi\u001b[24mn\u001b[24mu\u001b[24mx\u001b[24m6\u001b[24m4" + ], + [ + 0.000154, + "\r\r\n" + ], + [ + 1.8e-05, + "\u001b[J\u001b[7mborg-linux64 \u001b[0m \u001b[Jborg-linux64.asc\u001b[J\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify borg-linux64\u001b[K" + ], + [ + 0.189964, + "\r\r\n" + ], + [ + 1.7e-05, + "\u001b[7mborg-linux64 \u001b[0m \r\u001b[7mborg-linux64 \u001b[0m \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify borg-linux64\u001b[K.asc" + ], + [ + 0.000225, + "\r\r\n" + ], + [ + 1.9e-05, + "\u001b[18C\u001b[7mborg-linux64.asc\u001b[0m\rborg-linux64 \r\u001b[A\u001b[0m\u001b[27m\u001b[24m\r\u001b[2C\u001b[32mgpg\u001b[39m --verify borg-linux64.asc\u001b[K" + ], + [ + 0.866638, + "\r\r\n\u001b[J\u001b[A\u001b[31C\u001b[1m \u001b[0m" + ], + [ + 0.001241, + "\r\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[32mgpg\u001b[39m --verify \u001b[4mborg-linux64.asc\u001b[24m\u001b[1m \u001b[0m\u001b[K" + ], + [ + 0.654098, + "\b\u001b[0m \b" + ], + [ + 2.7e-05, + "\u001b[?1l\u001b>" + ], + [ + 0.001361, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000737, + "\u001b]2;gpg --verify borg-linux64.asc\u0007" + ], + [ + 2.6e-05, + "\u001b]1;gpg\u0007" + ], + [ + 0.002478, + "gpg: assuming signed data in `borg-linux64'\r\n" + ], + [ + 0.082679, + "gpg: Signature made Sun Jun 18 16:54:19 2017 CEST\r\ngpg: using RSA key 0x243ACFA951F78E01\r\n" + ], + [ + 0.003947, + "gpg: Good signature from \"Thomas Waldmann \" [ultimate]\r\ngpg: aka \"Thomas Waldmann \" [ultimate]\r\ngpg: aka \"Thomas Waldmann \" [ultimate]\r\n" + ], + [ + 2.1e-05, + "gpg: aka \"Thomas Waldmann \"" + ], + [ + 1.5e-05, + " [ultimate]\r\n" + ], + [ + 0.001743, + "Primary key fingerprint: 6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393\r\n Subkey fingerprint: 2F81 AFFB AB04 E11F E8EE 65D4 243A CFA9 51F7 8E01\r\n" + ], + [ + 0.000384, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000155, + "\u001b[?2004h" + ], + [ + 4.627219, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.225001, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.213579, + "\b\b\u001b[1m#\u001b[1m \u001b[1mO\u001b[0m\u001b[39m" + ], + [ + 0.132218, + "\b\u001b[1mO\u001b[1mk\u001b[0m\u001b[39m" + ], + [ + 0.061577, + "\b\u001b[1mk\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.154786, + "\b\u001b[1ma\u001b[1my\u001b[0m\u001b[39m" + ], + [ + 0.172921, + "\b\u001b[1my\u001b[1m,\u001b[0m\u001b[39m" + ], + [ + 0.648978, + "\b\u001b[1m,\u001b[1m the binary is valid!\u001b[0m\u001b[39m" + ], + [ + 0.822303, + "\u001b[?1l\u001b>" + ], + [ + 0.000388, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000681, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000113, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 4.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000252, + "\u001b[?2004h" + ], + [ + 2.048081, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.243659, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.174242, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.131485, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.109555, + "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.128309, + "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.163064, + "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.138953, + "\b\u001b[1mi\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.050135, + "\b\u001b[1mn\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.095385, + "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.114692, + "\b\u001b[1mt\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.155821, + "\b\u001b[1ma\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.118297, + "\b\u001b[1ml\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.165834, + "\b\u001b[1ml\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.231866, + "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.159893, + "\b\u001b[1mi\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.289328, + "\b\u001b[1mt\u001b[1m:\u001b[0m\u001b[39m" + ], + [ + 2.713706, + "\u001b[?1l\u001b>" + ], + [ + 0.000362, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000674, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 9e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000272, + "\u001b[?2004h" + ], + [ + 1.703796, + "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" + ], + [ + 0.12754, + "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" + ], + [ + 0.149508, + "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" + ], + [ + 0.121616, + "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" + ], + [ + 0.321903, + " \u001b[32mcp\u001b[39m \u001b[4mborg-linux64\u001b[24m \u001b[4m/usr/local/bin/borg\u001b[24m" + ], + [ + 2.352378, + "\u001b[?1l\u001b>" + ], + [ + 0.001087, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00091, + "\u001b]2;sudo cp borg-linux64 /usr/local/bin/borg\u0007\u001b]1;cp\u0007" + ], + [ + 0.013652, + "[sudo] password for rugk: " + ], + [ + 2.992379, + "\r\n" + ], + [ + 0.031173, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 4e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.3e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000214, + "\u001b[?2004h" + ], + [ + 5.400881, + "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" + ], + [ + 0.138474, + "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" + ], + [ + 0.114266, + "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" + ], + [ + 0.098068, + "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" + ], + [ + 0.16926, + " " + ], + [ + 0.188874, + "\u001b[32mcp\u001b[39m \u001b[4mborg-linux64\u001b[24m \u001b[4m/usr/local/bin/borg\u001b[24m" + ], + [ + 0.413244, + "\u001b[34D\u001b[32mh\u001b[32mo\u001b[24m\u001b[32mw\u001b[24m\u001b[32mn\u001b[39m\u001b[24m \u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[24m:\u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[20C" + ], + [ + 1.397429, + "\u001b[?1l\u001b>" + ], + [ + 0.00132, + "\u001b[?2004l\r\r\n" + ], + [ + 0.00075, + "\u001b]2;sudo chown root:root /usr/local/bin/borg\u0007\u001b]1;chown\u0007" + ], + [ + 0.010539, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000284, + "\u001b[?2004h" + ], + [ + 2.229436, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.213191, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.099902, + "\b\b\u001b[1m#\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.069437, + "\b\u001b[1ma\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.106463, + "\b\u001b[1mn\u001b[1md\u001b[0m\u001b[39m" + ], + [ + 0.080598, + "\b\u001b[1md\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.172381, + "\b\u001b[1m \u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.096638, + "\b\u001b[1mm\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.078606, + "\b\u001b[1ma\u001b[1mk\u001b[0m\u001b[39m" + ], + [ + 0.106382, + "\b\u001b[1mk\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.110174, + "\b\u001b[1me\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.216964, + "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.183739, + "\b\u001b[1mi\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.150872, + "\b\u001b[1mt\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.188901, + "\b\u001b[1m \u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.099651, + "\b\u001b[1me\u001b[1mx\u001b[0m\u001b[39m" + ], + [ + 0.1893, + "\b\u001b[1mx\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.187999, + "\b\u001b[1me\u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.128262, + "\b\u001b[1mc\u001b[1mu\u001b[0m\u001b[39m" + ], + [ + 0.144851, + "\b\u001b[1mu\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.091175, + "\b\u001b[1mt\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.135575, + "\b\u001b[1ma\u001b[1mb\u001b[0m\u001b[39m" + ], + [ + 0.18045, + "\b\u001b[1mb\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.110687, + "\b\u001b[1ml\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.360861, + "\b\u001b[1me\u001b[1m…\u001b[0m\u001b[39m" + ], + [ + 0.69896, + "\u001b[?1l\u001b>" + ], + [ + 0.000433, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000544, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 5.1e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000306, + "\u001b[?2004h" + ], + [ + 1.028139, + "\u001b[1m\u001b[31ms\u001b[0m\u001b[39m" + ], + [ + 0.136555, + "\b\u001b[0m\u001b[32ms\u001b[32mu\u001b[39m" + ], + [ + 0.115701, + "\b\b\u001b[1m\u001b[31ms\u001b[1m\u001b[31mu\u001b[1m\u001b[31md\u001b[0m\u001b[39m" + ], + [ + 0.151048, + "\b\b\b\u001b[0m\u001b[4m\u001b[32ms\u001b[0m\u001b[4m\u001b[32mu\u001b[0m\u001b[4m\u001b[32md\u001b[4m\u001b[32mo\u001b[24m\u001b[39m" + ], + [ + 0.276036, + " \u001b[32mchown\u001b[39m root:root \u001b[4m/usr/local/bin/borg\u001b[24m" + ], + [ + 0.284509, + "\u001b[34D\u001b[32mp\u001b[39m\u001b[39m \u001b[39m\u001b[4mb\u001b[39m\u001b[4mo\u001b[4mr\u001b[4mg\u001b[4m-\u001b[4ml\u001b[4mi\u001b[4mn\u001b[4mu\u001b[4mx\u001b[4m6\u001b[4m4\u001b[24m\u001b[20C" + ], + [ + 0.422112, + "\u001b[34D\u001b[32mh\u001b[32mo\u001b[24m\u001b[32mw\u001b[24m\u001b[32mn\u001b[39m\u001b[24m \u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[24m:\u001b[24mr\u001b[24mo\u001b[24mo\u001b[24mt\u001b[20C" + ], + [ + 0.616462, + "\u001b[33D\u001b[32mm\u001b[32mo\u001b[32md\u001b[39m 755\u001b[6P\u001b[20C \b\b\b\b\b\b" + ], + [ + 1.090337, + "\u001b[?1l\u001b>" + ], + [ + 0.00101, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000655, + "\u001b]2;sudo chmod 755 /usr/local/bin/borg\u0007" + ], + [ + 1.8e-05, + "\u001b]1;chmod\u0007" + ], + [ + 0.009932, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000124, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 5.9e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000237, + "\u001b[?2004h" + ], + [ + 3.613554, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.305561, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.451533, + "\b\b\u001b[1m#\u001b[1m \u001b[1mN\u001b[0m\u001b[39m" + ], + [ + 0.199295, + "\b\u001b[1mN\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.134017, + "\b\u001b[1mo\u001b[1mw\u001b[0m\u001b[39m" + ], + [ + 0.232574, + "\b\u001b[1mw\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.283449, + "\b\u001b[1m \u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.156927, + "\b\u001b[1mc\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.100718, + "\b\u001b[1mh\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.145048, + "\b\u001b[1me\u001b[1mc\u001b[0m\u001b[39m" + ], + [ + 0.238223, + "\b\u001b[1mc\u001b[1mk\u001b[0m\u001b[39m" + ], + [ + 0.145393, + "\b\u001b[1mk\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.195514, + "\b\u001b[1m \u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.190153, + "\b\u001b[1mi\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 1.202922, + "\b\u001b[1mt\u001b[1m:\u001b[0m\u001b[39m" + ], + [ + 0.17572, + "\b\u001b[1m:\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.209752, + "\b\u001b[1m \u001b[1m(\u001b[0m\u001b[39m" + ], + [ + 0.266264, + "\b\u001b[1m(\u001b[1mp\u001b[0m\u001b[39m" + ], + [ + 0.136174, + "\b\u001b[1mp\u001b[1mo\u001b[0m\u001b[39m" + ], + [ + 0.136549, + "\b\u001b[1mo\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.157321, + "\b\u001b[1ms\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.134812, + "\b\u001b[1ms\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.177707, + "\b\u001b[1mi\u001b[1mb\u001b[0m\u001b[39m" + ], + [ + 0.184458, + "\b\u001b[1mb\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.104718, + "\b\u001b[1ml\u001b[1my\u001b[0m\u001b[39m" + ], + [ + 0.132476, + "\b\u001b[1my\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.14269, + "\b\u001b[1m \u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.109627, + "\b\u001b[1mn\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.150487, + "\b\u001b[1me\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.202663, + "\b\u001b[1me\u001b[1md\u001b[0m\u001b[39m" + ], + [ + 0.12975, + "\b\u001b[1md\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.095469, + "\b\u001b[1ms\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.160511, + "\b\u001b[1m \u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.149495, + "\b\u001b[1ma\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.189727, + "\b\u001b[1m \u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.098768, + "\b\u001b[1mt\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.125099, + "\b\u001b[1me\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.077112, + "\b\u001b[1mr\u001b[1mm\u001b[0m\u001b[39m" + ], + [ + 0.147886, + "\b\u001b[1mm\u001b[1mi\u001b[0m\u001b[39m" + ], + [ + 0.124366, + "\b\u001b[1mi\u001b[1mn\u001b[0m\u001b[39m" + ], + [ + 0.088118, + "\b\u001b[1mn\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.116281, + "\b\u001b[1ma\u001b[1ml\u001b[0m\u001b[39m" + ], + [ + 0.146487, + "\b\u001b[1ml\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.156764, + "\b\u001b[1m \u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.195688, + "\b\u001b[1mr\u001b[1me\u001b[0m\u001b[39m" + ], + [ + 0.40621, + "\b\u001b[1me\u001b[1ms\u001b[0m\u001b[39m" + ], + [ + 0.263813, + "\b\u001b[1ms\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.087475, + "\b\u001b[1mt\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.090176, + "\b\u001b[1ma\u001b[1mr\u001b[0m\u001b[39m" + ], + [ + 0.12059, + "\b\u001b[1mr\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.232423, + "\b\u001b[1mt\u001b[1m)\u001b[0m\u001b[39m" + ], + [ + 1.383975, + "\u001b[?1l\u001b>" + ], + [ + 0.000692, + "\u001b[?2004l\r\r\n" + ], + [ + 0.001339, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000147, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.6e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000558, + "\u001b[?2004h" + ], + [ + 1.045406, + "\u001b[4mb\u001b[24m" + ], + [ + 0.163217, + "\b\u001b[4mb\u001b[4mo\u001b[24m" + ], + [ + 0.131464, + "\b\b\u001b[4mb\u001b[4mo\u001b[4mr\u001b[24m" + ], + [ + 0.103279, + "\b\b\b\u001b[24m\u001b[32mb\u001b[24m\u001b[32mo\u001b[24m\u001b[32mr\u001b[32mg\u001b[39m" + ], + [ + 0.181118, + " " + ], + [ + 0.440449, + "-" + ], + [ + 0.186299, + "V" + ], + [ + 0.522054, + "\u001b[?1l\u001b>" + ], + [ + 0.000643, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000967, + "\u001b]2;borg -V\u0007\u001b]1;borg\u0007" + ], + [ + 0.426128, + "borg 1.1.0b6\r\n" + ], + [ + 0.040916, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 0.000101, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.5e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000176, + "\u001b[?2004h" + ], + [ + 1.92655, + "\u001b[1m#\u001b[0m\u001b[39m" + ], + [ + 0.247681, + "\b\u001b[1m#\u001b[1m \u001b[0m\u001b[39m" + ], + [ + 0.233391, + "\b\b\u001b[1m#\u001b[1m \u001b[1mT\u001b[0m\u001b[39m" + ], + [ + 0.127191, + "\b\u001b[1mT\u001b[1mh\u001b[0m\u001b[39m" + ], + [ + 0.023053, + "\b\u001b[1mh\u001b[1ma\u001b[0m\u001b[39m" + ], + [ + 0.155649, + "\b\u001b[1ma\u001b[1mt\u001b[0m\u001b[39m" + ], + [ + 0.3483, + "\b\u001b[1mt\u001b[1m's it! Check out the other screencasts to see how to actually use borg\u001b[1mb\u001b[1mackup.\u001b[0m\u001b[39m\u001b[K" + ], + [ + 1.701253, + "\u001b[?1l\u001b>" + ], + [ + 0.000707, + "\u001b[?2004l\r\r\n" + ], + [ + 0.000682, + "\u001b[1m\u001b[7m%\u001b[27m\u001b[1m\u001b[0m \r \r" + ], + [ + 1.1e-05, + "\u001b]1;..lder/borgdemo\u0007" + ], + [ + 5.7e-05, + "\r\u001b[0m\u001b[27m\u001b[24m\u001b[J$ \u001b[K" + ], + [ + 6.8e-05, + "\u001b[?1h\u001b=" + ], + [ + 0.000284, + "\u001b[?2004h" + ], + [ + 1.579085, + "\u001b[?2004l\r\r\n" + ] + ] +} diff --git a/docs/misc/asciinema/install.sh b/docs/misc/asciinema/install.sh new file mode 100644 index 0000000000..3ade47c889 --- /dev/null +++ b/docs/misc/asciinema/install.sh @@ -0,0 +1,21 @@ +# This asciinema will show you the installation of borg as a standalone binary. Usually you only need this if you want to have an up-to-date version of borg or no package is available for your distro/OS. + +# First, we need to download the version, we'd like to install… +wget -q --show-progress https://github.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64 +# and do not forget the GPG signature…! +wget -q --show-progress https://github.com/borgbackup/borg/releases/download/1.1.0b6/borg-linux64.asc + +# In this case, we have already imported the public key of a borg developer. So we only need to verify it: +gpg --verify borg-linux64.asc +# Okay, the binary is valid! + +# Now install it: +sudo cp borg-linux64 /usr/local/bin/borg +sudo chown root:root /usr/local/bin/borg +# and make it executable… +sudo chmod 755 /usr/local/bin/borg + +# Now check it: (possibly needs a terminal restart) +borg -V + +# That's it! Check out the other screencasts to see how to actually use borgbackup. diff --git a/docs/misc/asciinema/install_and_basics.json b/docs/misc/asciinema/install_and_basics.json deleted file mode 100644 index a50eb35f1a..0000000000 --- a/docs/misc/asciinema/install_and_basics.json +++ /dev/null @@ -1,5550 +0,0 @@ -{ - "version": 1, - "width": 80, - "height": 24, - "duration": 332.0, - "command": "/bin/bash", - "title": "borgbackup - installation and basic usage", - "env": { - "TERM": "xterm", - "SHELL": "/bin/bash" - }, - "stdout": [ - [ - 0.083341, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.349103, - "#" - ], - [ - 0.338948, - " " - ], - [ - 0.185424, - "b" - ], - [ - 0.142971, - "o" - ], - [ - 0.091227, - "r" - ], - [ - 0.092867, - "g" - ], - [ - 0.222552, - "b" - ], - [ - 0.114706, - "a" - ], - [ - 0.125044, - "c" - ], - [ - 0.144755, - "k" - ], - [ - 0.241044, - "u" - ], - [ - 0.243681, - "p" - ], - [ - 0.265888, - " " - ], - [ - 0.345247, - "-" - ], - [ - 0.251918, - " " - ], - [ - 0.233420, - "i" - ], - [ - 0.078609, - "n" - ], - [ - 0.076809, - "s" - ], - [ - 0.070225, - "t" - ], - [ - 0.148413, - "a" - ], - [ - 0.077403, - "l" - ], - [ - 0.139884, - "l" - ], - [ - 0.084807, - "a" - ], - [ - 0.138823, - "t" - ], - [ - 0.068185, - "i" - ], - [ - 0.170422, - "o" - ], - [ - 0.161091, - "n" - ], - [ - 0.169247, - " " - ], - [ - 0.110722, - "a" - ], - [ - 0.113785, - "n" - ], - [ - 0.397895, - "d" - ], - [ - 0.305048, - " " - ], - [ - 0.211476, - "b" - ], - [ - 0.109865, - "a" - ], - [ - 0.230634, - "s" - ], - [ - 0.277915, - "i" - ], - [ - 0.206167, - "c" - ], - [ - 0.145265, - " " - ], - [ - 0.219619, - "u" - ], - [ - 0.139989, - "s" - ], - [ - 0.180240, - "a" - ], - [ - 0.200391, - "g" - ], - [ - 0.116961, - "e" - ], - [ - 0.172074, - "\r\n" - ], - [ - 0.000449, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.620909, - "#" - ], - [ - 0.217833, - " " - ], - [ - 0.592920, - "I" - ], - [ - 0.166726, - " " - ], - [ - 0.161953, - "h" - ], - [ - 0.072501, - "a" - ], - [ - 0.170951, - "v" - ], - [ - 0.154067, - "e" - ], - [ - 0.110535, - " " - ], - [ - 0.155235, - "a" - ], - [ - 0.130825, - "l" - ], - [ - 0.111834, - "r" - ], - [ - 0.142378, - "e" - ], - [ - 0.165867, - "a" - ], - [ - 0.062556, - "d" - ], - [ - 0.091778, - "y" - ], - [ - 0.216280, - " " - ], - [ - 0.169501, - "d" - ], - [ - 0.198240, - "o" - ], - [ - 0.092373, - "w" - ], - [ - 0.143405, - "n" - ], - [ - 0.207324, - "l" - ], - [ - 0.164248, - "o" - ], - [ - 0.088481, - "a" - ], - [ - 0.129191, - "d" - ], - [ - 0.179234, - "e" - ], - [ - 0.189248, - "d" - ], - [ - 0.145203, - " " - ], - [ - 0.221625, - "t" - ], - [ - 0.100064, - "h" - ], - [ - 0.133349, - "e" - ], - [ - 0.066501, - " " - ], - [ - 0.187004, - "f" - ], - [ - 0.142461, - "i" - ], - [ - 0.204723, - "l" - ], - [ - 0.068716, - "e" - ], - [ - 0.237576, - "s" - ], - [ - 0.128085, - ":" - ], - [ - 0.242282, - "\r\n" - ], - [ - 0.000327, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 1.796834, - "l" - ], - [ - 0.092545, - "s" - ], - [ - 0.210322, - " " - ], - [ - 0.189710, - "-" - ], - [ - 0.215532, - "l" - ], - [ - 0.852863, - "\r\n" - ], - [ - 0.002104, - "total 10620\r\n" - ], - [ - 0.000040, - "-rw-rw-r-- 1 tw tw 10869049 Oct 24 22:11 borg-linux64" - ], - [ - 0.000007, - "\r\n" - ], - [ - 0.000019, - "-rw-rw-r-- 1 tw tw 819 Oct 24 22:11 borg-linux64.asc\r\n" - ], - [ - 0.000431, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.513172, - "#" - ], - [ - 0.284059, - " " - ], - [ - 0.330931, - "b" - ], - [ - 0.118806, - "i" - ], - [ - 0.100553, - "n" - ], - [ - 0.259930, - "a" - ], - [ - 0.106715, - "r" - ], - [ - 0.276545, - "y" - ], - [ - 0.126132, - " " - ], - [ - 0.379724, - "+" - ], - [ - 0.199249, - " " - ], - [ - 0.295913, - "G" - ], - [ - 0.108970, - "P" - ], - [ - 0.080480, - "G" - ], - [ - 0.349293, - " " - ], - [ - 0.236785, - "s" - ], - [ - 0.105197, - "i" - ], - [ - 0.289951, - "g" - ], - [ - 0.351385, - "n" - ], - [ - 0.282003, - "a" - ], - [ - 0.206591, - "t" - ], - [ - 0.163963, - "u" - ], - [ - 0.082416, - "r" - ], - [ - 0.125432, - "e" - ], - [ - 0.369988, - "\r\n" - ], - [ - 0.000341, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.889617, - "#" - ], - [ - 0.226974, - " " - ], - [ - 0.218497, - "l" - ], - [ - 0.134545, - "e" - ], - [ - 0.103159, - "t" - ], - [ - 0.711682, - "'" - ], - [ - 0.185463, - "s" - ], - [ - 0.162130, - " " - ], - [ - 0.166049, - "v" - ], - [ - 0.183069, - "e" - ], - [ - 0.099764, - "r" - ], - [ - 0.234211, - "i" - ], - [ - 0.854328, - "f" - ], - [ - 0.203758, - "y" - ], - [ - 0.166681, - " " - ], - [ - 0.216715, - "t" - ], - [ - 0.560064, - "h" - ], - [ - 0.151837, - "a" - ], - [ - 0.194509, - "t" - ], - [ - 0.119665, - " " - ], - [ - 0.141089, - "t" - ], - [ - 0.096803, - "h" - ], - [ - 0.104718, - "e" - ], - [ - 0.106761, - " " - ], - [ - 0.229401, - "b" - ], - [ - 0.213802, - "i" - ], - [ - 0.075481, - "n" - ], - [ - 0.138720, - "a" - ], - [ - 0.062411, - "r" - ], - [ - 0.292719, - "y" - ], - [ - 0.482737, - " " - ], - [ - 0.211595, - "i" - ], - [ - 0.110964, - "s" - ], - [ - 0.102100, - " " - ], - [ - 0.143380, - "v" - ], - [ - 0.189214, - "a" - ], - [ - 0.099337, - "l" - ], - [ - 0.172757, - "i" - ], - [ - 0.082456, - "d" - ], - [ - 0.177514, - ":" - ], - [ - 0.622492, - "\r\n" - ], - [ - 0.000313, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 2.000000, - "g" - ], - [ - 0.261924, - "p" - ], - [ - 0.108570, - "g" - ], - [ - 0.247315, - " " - ], - [ - 0.277162, - "-" - ], - [ - 0.141397, - "-" - ], - [ - 0.143255, - "v" - ], - [ - 0.162858, - "e" - ], - [ - 0.040051, - "r" - ], - [ - 0.105941, - "i" - ], - [ - 0.144872, - "f" - ], - [ - 0.306497, - "y" - ], - [ - 0.468271, - " " - ], - [ - 2.000000, - "b" - ], - [ - 0.119390, - "o" - ], - [ - 0.463137, - "\u0007" - ], - [ - 0.000095, - "rg-linux64" - ], - [ - 0.341519, - "." - ], - [ - 0.146977, - "asc " - ], - [ - 0.186292, - "\r\n" - ], - [ - 0.100648, - "gpg: Signature made Wed 07 Oct 2015 02:41:38 PM CEST\r\n" - ], - [ - 0.000011, - "gpg: using RSA key 243ACFA951F78E01\r\n" - ], - [ - 0.006906, - "gpg: Good signature from \"Thomas Waldmann \u003ctw@waldmann-edv.de\u003e\"" - ], - [ - 0.000033, - "\r\ngpg: aka \"Thomas Waldmann \u003ctw-public@gmx.de\u003e\"\r\ngpg: aka \"Thomas Waldmann \u003ctwaldmann@thinkmo.de\u003e\"\r\n" - ], - [ - 0.000018, - "gpg: aka \"Thomas Waldmann \u003cthomas.j.waldmann@gmail.com\u003e\"\r\n" - ], - [ - 0.003077, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 2.000000, - "#" - ], - [ - 0.241501, - " " - ], - [ - 0.186571, - "e" - ], - [ - 0.214388, - "v" - ], - [ - 0.157101, - "e" - ], - [ - 0.042348, - "r" - ], - [ - 0.253261, - "y" - ], - [ - 0.254356, - "t" - ], - [ - 0.094622, - "h" - ], - [ - 0.213972, - "i" - ], - [ - 0.084853, - "n" - ], - [ - 0.084920, - "g" - ], - [ - 0.178519, - " " - ], - [ - 0.256151, - "o" - ], - [ - 0.217918, - "k" - ], - [ - 0.153899, - "!" - ], - [ - 0.246211, - "\r\n" - ], - [ - 0.000330, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 2.000000, - "#" - ], - [ - 0.288008, - " " - ], - [ - 0.232836, - "i" - ], - [ - 0.055326, - "n" - ], - [ - 0.142978, - "s" - ], - [ - 0.080599, - "t" - ], - [ - 0.139018, - "a" - ], - [ - 0.111052, - "l" - ], - [ - 0.132419, - "l" - ], - [ - 0.169037, - " " - ], - [ - 0.117036, - "t" - ], - [ - 0.092749, - "h" - ], - [ - 0.124768, - "e" - ], - [ - 0.088888, - " " - ], - [ - 0.184118, - "b" - ], - [ - 0.182336, - "i" - ], - [ - 0.075466, - "n" - ], - [ - 0.085516, - "a" - ], - [ - 0.060363, - "r" - ], - [ - 0.843225, - "y" - ], - [ - 0.209758, - " " - ], - [ - 0.168892, - "a" - ], - [ - 0.151126, - "s" - ], - [ - 0.127487, - " " - ], - [ - 0.300923, - "\"" - ], - [ - 0.217060, - "b" - ], - [ - 0.221579, - "o" - ], - [ - 0.047775, - "r" - ], - [ - 0.107202, - "g" - ], - [ - 0.438939, - "\"" - ], - [ - 0.253153, - ":" - ], - [ - 0.617823, - "\r\n" - ], - [ - 0.000326, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.816740, - "c" - ], - [ - 0.168734, - "p" - ], - [ - 0.230846, - " " - ], - [ - 0.299588, - "b" - ], - [ - 0.121082, - "o" - ], - [ - 0.214148, - "\u0007" - ], - [ - 0.000011, - "rg-linux64" - ], - [ - 0.331736, - " " - ], - [ - 0.812264, - "~" - ], - [ - 0.518926, - "/" - ], - [ - 0.233797, - "b" - ], - [ - 0.214141, - "i" - ], - [ - 0.098062, - "n" - ], - [ - 0.607725, - "/" - ], - [ - 0.566434, - "b" - ], - [ - 0.145886, - "o" - ], - [ - 0.113081, - "r" - ], - [ - 0.068870, - "g" - ], - [ - 0.851794, - "\r\n" - ], - [ - 0.012632, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 2.000000, - "#" - ], - [ - 0.269926, - " " - ], - [ - 0.208575, - "m" - ], - [ - 0.135192, - "a" - ], - [ - 0.119543, - "k" - ], - [ - 0.080873, - "e" - ], - [ - 0.156871, - " " - ], - [ - 0.197124, - "i" - ], - [ - 0.078784, - "t" - ], - [ - 0.142373, - " " - ], - [ - 0.189080, - "e" - ], - [ - 0.232597, - "x" - ], - [ - 0.170105, - "e" - ], - [ - 0.132039, - "c" - ], - [ - 0.230568, - "u" - ], - [ - 0.086573, - "t" - ], - [ - 0.255047, - "a" - ], - [ - 0.231478, - "b" - ], - [ - 0.283723, - "l" - ], - [ - 0.112987, - "e" - ], - [ - 0.518611, - ":" - ], - [ - 0.459423, - "\r\n" - ], - [ - 0.000822, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.353739, - "c" - ], - [ - 0.114161, - "h" - ], - [ - 0.268562, - "m" - ], - [ - 0.179085, - "o" - ], - [ - 0.145360, - "d" - ], - [ - 0.075599, - " " - ], - [ - 0.773964, - "+" - ], - [ - 0.113699, - "x" - ], - [ - 0.187579, - " " - ], - [ - 0.381391, - "~" - ], - [ - 0.512520, - "/" - ], - [ - 0.231090, - "b" - ], - [ - 0.197636, - "i" - ], - [ - 0.101238, - "n" - ], - [ - 0.341295, - "/" - ], - [ - 0.306047, - "b" - ], - [ - 0.106898, - "o" - ], - [ - 0.233773, - "rg " - ], - [ - 0.519336, - "\r\n" - ], - [ - 0.001408, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 2.000000, - "#" - ], - [ - 0.247104, - " " - ], - [ - 0.218717, - "i" - ], - [ - 0.067769, - "n" - ], - [ - 0.139583, - "s" - ], - [ - 0.092034, - "t" - ], - [ - 0.152729, - "a" - ], - [ - 0.083844, - "l" - ], - [ - 0.145806, - "l" - ], - [ - 0.120879, - "a" - ], - [ - 0.164967, - "t" - ], - [ - 0.065308, - "i" - ], - [ - 0.816983, - "o" - ], - [ - 0.231669, - "n" - ], - [ - 0.185168, - " " - ], - [ - 0.125214, - "d" - ], - [ - 0.112630, - "o" - ], - [ - 0.068650, - "n" - ], - [ - 0.108386, - "e" - ], - [ - 0.563031, - "!" - ], - [ - 2.000000, - "\r\n" - ], - [ - 0.000365, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.347093, - "#" - ], - [ - 0.262764, - " " - ], - [ - 0.191568, - "l" - ], - [ - 0.086614, - "e" - ], - [ - 0.110365, - "t" - ], - [ - 0.707057, - "'" - ], - [ - 0.220060, - "s" - ], - [ - 0.181690, - " " - ], - [ - 0.128039, - "c" - ], - [ - 0.176264, - "r" - ], - [ - 0.171208, - "e" - ], - [ - 0.199371, - "a" - ], - [ - 0.161622, - "t" - ], - [ - 0.145989, - "e" - ], - [ - 0.187920, - " " - ], - [ - 0.734653, - "a" - ], - [ - 0.185812, - " " - ], - [ - 0.270851, - "r" - ], - [ - 0.120000, - "e" - ], - [ - 0.161097, - "p" - ], - [ - 0.179813, - "o" - ], - [ - 0.170557, - "s" - ], - [ - 0.145457, - "i" - ], - [ - 0.165200, - "t" - ], - [ - 0.135578, - "o" - ], - [ - 0.130363, - "r" - ], - [ - 0.461631, - "y" - ], - [ - 0.303047, - ":" - ], - [ - 0.955198, - "\r\n" - ], - [ - 0.000300, - "\u001b]0;tw@tux: ~/borg/demo/download\u0007tw@tux:~/borg/demo/download$ " - ], - [ - 0.301237, - "c" - ], - [ - 0.084267, - "d" - ], - [ - 0.155241, - " " - ], - [ - 0.813751, - "." - ], - [ - 0.157147, - "." - ], - [ - 0.573720, - "\r\n" - ], - [ - 0.000508, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.225463, - "b" - ], - [ - 0.274841, - "o" - ], - [ - 0.125292, - "r" - ], - [ - 0.083313, - "g" - ], - [ - 0.088596, - " " - ], - [ - 0.231502, - "i" - ], - [ - 0.062726, - "n" - ], - [ - 0.144877, - "i" - ], - [ - 0.112508, - "t" - ], - [ - 0.313489, - " " - ], - [ - 0.298944, - "r" - ], - [ - 0.216556, - "e" - ], - [ - 0.180484, - "p" - ], - [ - 0.204141, - "o" - ], - [ - 0.682782, - "\r\n" - ], - [ - 0.352828, - "Initializing repository at \"repo\"\r\n" - ], - [ - 0.001407, - "Encryption NOT enabled.\r\nUse the \"--encryption=repokey|keyfile|passphrase\" to enable encryption." - ], - [ - 0.000009, - "\r\n" - ], - [ - 0.008492, - "Synchronizing chunks cache..." - ], - [ - 0.000009, - "\r\n" - ], - [ - 0.000030, - "Archives: 0, w/ cached Idx: 0, w/ outdated Idx: 0, w/o cached Idx: 0." - ], - [ - 0.000004, - "\r\n" - ], - [ - 0.000024, - "Done." - ], - [ - 0.000004, - "\r\n" - ], - [ - 0.027827, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.988184, - "#" - ], - [ - 0.248844, - " " - ], - [ - 0.199486, - "l" - ], - [ - 0.104455, - "e" - ], - [ - 0.127960, - "t" - ], - [ - 0.484976, - "'" - ], - [ - 0.186103, - "s" - ], - [ - 0.151763, - " " - ], - [ - 0.177456, - "c" - ], - [ - 0.178972, - "r" - ], - [ - 0.183533, - "e" - ], - [ - 0.192725, - "a" - ], - [ - 0.146352, - "t" - ], - [ - 0.156199, - "e" - ], - [ - 0.232699, - " " - ], - [ - 0.513490, - "o" - ], - [ - 0.229828, - "u" - ], - [ - 0.104744, - "r" - ], - [ - 0.115068, - " " - ], - [ - 0.201439, - "f" - ], - [ - 0.333315, - "i" - ], - [ - 0.209070, - "r" - ], - [ - 0.259194, - "s" - ], - [ - 0.076346, - "t" - ], - [ - 0.125673, - " " - ], - [ - 0.198575, - "b" - ], - [ - 0.089009, - "a" - ], - [ - 0.238307, - "c" - ], - [ - 0.105568, - "k" - ], - [ - 0.254971, - "u" - ], - [ - 0.318094, - "p" - ], - [ - 0.690770, - ":" - ], - [ - 0.580155, - "\r\n" - ], - [ - 0.000308, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.603046, - "b" - ], - [ - 0.104492, - "o" - ], - [ - 0.148182, - "r" - ], - [ - 0.087024, - "g" - ], - [ - 0.176897, - " " - ], - [ - 0.183168, - "c" - ], - [ - 0.185325, - "r" - ], - [ - 0.183347, - "e" - ], - [ - 0.182868, - "a" - ], - [ - 0.170600, - "t" - ], - [ - 0.137005, - "e" - ], - [ - 0.164357, - " " - ], - [ - 0.427028, - "-" - ], - [ - 0.147791, - "-" - ], - [ - 0.440101, - "s" - ], - [ - 0.177193, - "t" - ], - [ - 0.203817, - "a" - ], - [ - 0.217150, - "t" - ], - [ - 0.229771, - "s" - ], - [ - 0.191220, - " " - ], - [ - 0.269939, - "-" - ], - [ - 0.145163, - "-" - ], - [ - 0.450053, - "p" - ], - [ - 0.165194, - "r" - ], - [ - 0.044264, - "o" - ], - [ - 0.204568, - "g" - ], - [ - 0.104759, - "r" - ], - [ - 0.213137, - "e" - ], - [ - 0.216596, - "s" - ], - [ - 0.163238, - "s" - ], - [ - 0.241084, - " " - ], - [ - 0.300727, - "-" - ], - [ - 0.149156, - "-" - ], - [ - 0.259608, - "c" - ], - [ - 0.120930, - "o" - ], - [ - 0.098838, - "m" - ], - [ - 0.234615, - "p" - ], - [ - 0.084600, - "r" - ], - [ - 0.166072, - "e" - ], - [ - 0.185576, - "s" - ], - [ - 0.159984, - "s" - ], - [ - 0.122793, - "i" - ], - [ - 0.180423, - "o" - ], - [ - 0.196311, - "n" - ], - [ - 0.181682, - " " - ], - [ - 0.242129, - "l" - ], - [ - 0.842020, - "z" - ], - [ - 0.707941, - "4" - ], - [ - 0.180354, - " " - ], - [ - 0.419080, - "r" - ], - [ - 0.189076, - "e" - ], - [ - 0.172527, - "p" - ], - [ - 0.154922, - "o" - ], - [ - 0.728059, - ":" - ], - [ - 0.147089, - ":" - ], - [ - 0.396117, - "b" - ], - [ - 0.090233, - "a" - ], - [ - 0.199537, - "c" - ], - [ - 0.084686, - "k" - ], - [ - 0.278049, - "u \r" - ], - [ - 0.268438, - "p" - ], - [ - 0.491592, - "1" - ], - [ - 0.508588, - " " - ], - [ - 0.174143, - "d" - ], - [ - 0.175430, - "a" - ], - [ - 0.166841, - "t" - ], - [ - 0.127029, - "a" - ], - [ - 0.380593, - "\r\n" - ], - [ - 0.557518, - " 2.68 MB O 1.25 MB C 1.25 MB D data/linux-4.1.8/Doc...ia/v4l/func-read.xml\r" - ], - [ - 0.200102, - " 5.37 MB O 2.46 MB C 2.46 MB D data/linux-4.1.8/Documentation/backlight \r" - ], - [ - 0.200342, - " 6.99 MB O 3.36 MB C 3.36 MB D data/linux-4.1.8/Doc...rm_big_little_dt.txt\r" - ], - [ - 0.200137, - " 7.83 MB O 3.87 MB C 3.87 MB D data/linux-4.1.8/Doc...s/mtd/atmel-nand.txt\r" - ], - [ - 0.200271, - " 8.77 MB O 4.41 MB C 4.41 MB D data/linux-4.1.8/Doc...ngs/soc/fsl/qman.txt\r" - ], - [ - 0.200577, - " 9.99 MB O 5.12 MB C 5.12 MB D data/linux-4.1.8/Doc...ching/cachefiles.txt\r" - ], - [ - 0.200033, - " 12.11 MB O 6.34 MB C 6.33 MB D data/linux-4.1.8/Doc...infiniband/ipoib.txt\r" - ], - [ - 0.200272, - " 15.27 MB O 8.08 MB C 8.08 MB D data/linux-4.1.8/Doc.../networking/team.txt\r" - ], - [ - 0.200072, - " 18.22 MB O 9.72 MB C 9.71 MB D data/linux-4.1.8/Doc...tation/sysctl/vm.txt\r" - ], - [ - 0.202107, - " 21.05 MB O 11.22 MB C 11.21 MB D data/linux-4.1.8/MAINTAINERS \r" - ], - [ - 0.200251, - " 23.04 MB O 12.20 MB C 12.20 MB D data/linux-4.1.8/arc...de/uapi/asm/unistd.h\r" - ], - [ - 0.200450, - " 25.45 MB O 13.17 MB C 13.17 MB D data/linux-4.1.8/arc.../boot/dts/imx23.dtsi\r" - ], - [ - 0.200093, - " 27.65 MB O 14.01 MB C 14.00 MB D data/linux-4.1.8/arc...omap3-overo-tobi.dts\r" - ], - [ - 0.200314, - " 30.26 MB O 14.89 MB C 14.89 MB D data/linux-4.1.8/arc...ot/dts/tps65910.dtsi\r" - ], - [ - 0.200003, - " 31.90 MB O 15.63 MB C 15.63 MB D data/linux-4.1.8/arc...include/asm/probes.h\r" - ], - [ - 0.200493, - " 34.66 MB O 16.84 MB C 16.83 MB D data/linux-4.1.8/arc...i/include/mach/psc.h\r" - ], - [ - 0.200675, - " 36.62 MB O 17.70 MB C 17.70 MB D data/linux-4.1.8/arc...mach-ixp4xx/common.c\r" - ], - [ - 0.200307, - " 38.40 MB O 18.54 MB C 18.53 MB D data/linux-4.1.8/arch/arm/mach-omap2/cm.h \r" - ], - [ - 0.200254, - " 41.29 MB O 19.63 MB C 19.63 MB D data/linux-4.1.8/arch/arm/mach-pxa/idp.c \r" - ], - [ - 0.219493, - " 43.57 MB O 20.67 MB C 20.66 MB D data/linux-4.1.8/arc...bile/clock-r8a7778.c\r" - ], - [ - 0.200451, - " 45.55 MB O 21.59 MB C 21.59 MB D data/linux-4.1.8/arc...m/plat-samsung/adc.c\r" - ], - [ - 0.200370, - " 47.50 MB O 22.51 MB C 22.51 MB D data/linux-4.1.8/arch/arm64/lib/memmove.S \r" - ], - [ - 0.200686, - " 49.21 MB O 23.33 MB C 23.32 MB D data/linux-4.1.8/arc...ckfin/kernel/trace.c\r" - ], - [ - 0.200393, - " 53.22 MB O 24.51 MB C 24.50 MB D data/linux-4.1.8/arch/c6x/include/asm/soc.h\r" - ], - [ - 0.200371, - " 56.19 MB O 25.50 MB C 25.49 MB D data/linux-4.1.8/arc...op/iop_sw_cpu_defs.h\r" - ], - [ - 0.200450, - " 57.84 MB O 26.17 MB C 26.14 MB D data/linux-4.1.8/arc...include/asm/vm_mmu.h\r" - ], - [ - 0.200573, - " 60.21 MB O 27.27 MB C 27.25 MB D data/linux-4.1.8/arch/ia64/kernel/time.c \r" - ], - [ - 0.200222, - " 62.31 MB O 28.18 MB C 28.15 MB D data/linux-4.1.8/arc.../coldfire/sltimers.c\r" - ], - [ - 0.200756, - " 67.09 MB O 29.98 MB C 29.90 MB D data/linux-4.1.8/arc...8k/include/asm/tlb.h\r" - ], - [ - 0.200716, - " 68.75 MB O 30.80 MB C 30.72 MB D data/linux-4.1.8/arc...ude/uapi/asm/fcntl.h\r" - ], - [ - 0.200734, - " 70.69 MB O 31.67 MB C 31.59 MB D data/linux-4.1.8/arc...figs/malta_defconfig\r" - ], - [ - 0.200198, - " 72.12 MB O 32.34 MB C 32.26 MB D data/linux-4.1.8/arc...de/asm/mc146818rtc.h\r" - ], - [ - 0.200446, - " 76.01 MB O 33.45 MB C 33.37 MB D data/linux-4.1.8/arch/mips/jazz/jazzdma.c \r" - ], - [ - 0.200111, - " 78.19 MB O 34.46 MB C 34.38 MB D data/linux-4.1.8/arc...tlogic/common/time.c\r" - ], - [ - 0.200191, - " 79.84 MB O 35.30 MB C 35.21 MB D data/linux-4.1.8/arc...de/uapi/asm/msgbuf.h\r" - ], - [ - 0.200421, - " 81.35 MB O 36.07 MB C 35.99 MB D data/linux-4.1.8/arc...sc/include/asm/rtc.h\r" - ], - [ - 0.200090, - " 83.49 MB O 37.03 MB C 36.95 MB D data/linux-4.1.8/arc...fsl/qoriq-dma-1.dtsi\r" - ], - [ - 0.200331, - " 85.13 MB O 37.80 MB C 37.72 MB D data/linux-4.1.8/arc...pc836x_rdk_defconfig\r" - ], - [ - 0.200114, - " 87.04 MB O 38.71 MB C 38.63 MB D data/linux-4.1.8/arc...ude/uapi/asm/nvram.h\r" - ], - [ - 0.200280, - " 90.24 MB O 40.19 MB C 40.11 MB D data/linux-4.1.8/arc...pc/math-emu/mtfsfi.c\r" - ], - [ - 0.216796, - " 92.68 MB O 41.41 MB C 41.33 MB D data/linux-4.1.8/arc...rms/powermac/nvram.c\r" - ], - [ - 0.200198, - " 95.32 MB O 42.60 MB C 42.52 MB D data/linux-4.1.8/arc...nclude/asm/pgtable.h\r" - ], - [ - 0.200304, - " 97.31 MB O 43.50 MB C 43.42 MB D data/linux-4.1.8/arc...mach-dreamcast/irq.c\r" - ], - [ - 0.200328, - " 99.46 MB O 44.41 MB C 44.33 MB D data/linux-4.1.8/arc...artner-jet-setup.txt\r" - ], - [ - 0.200102, - "101.28 MB O 45.25 MB C 45.16 MB D data/linux-4.1.8/arc...rc/include/asm/ecc.h\r" - ], - [ - 0.200253, - "103.53 MB O 46.27 MB C 46.19 MB D data/linux-4.1.8/arc.../kernel/una_asm_64.S\r" - ], - [ - 0.200503, - "105.76 MB O 47.32 MB C 47.23 MB D data/linux-4.1.8/arch/tile/kernel/reboot.c \r" - ], - [ - 0.201177, - "107.64 MB O 48.27 MB C 48.18 MB D data/linux-4.1.8/arc...t/compressed/eboot.c\r" - ], - [ - 0.200192, - "109.82 MB O 49.22 MB C 49.13 MB D data/linux-4.1.8/arc...clude/asm/spinlock.h\r" - ], - [ - 0.200851, - "112.71 MB O 50.56 MB C 50.48 MB D data/linux-4.1.8/arch/x86/kernel/ptrace.c \r" - ], - [ - 0.200195, - "115.71 MB O 51.96 MB C 51.87 MB D data/linux-4.1.8/arc...s/platform_emc1403.c\r" - ], - [ - 0.200306, - "117.28 MB O 52.79 MB C 52.70 MB D data/linux-4.1.8/arc...nclude/variant/tie.h\r" - ], - [ - 0.204475, - "122.68 MB O 55.35 MB C 55.26 MB D data/linux-4.1.8/fir...x-e1-6.2.9.0.fw.ihex\r" - ], - [ - 0.199974, - "127.39 MB O 58.15 MB C 57.97 MB D data/linux-4.1.8/fs/afs/fsclient.c \r" - ], - [ - 0.201254, - "132.58 MB O 60.42 MB C 60.24 MB D data/linux-4.1.8/fs/cifs/cifssmb.c \r" - ], - [ - 0.216710, - "136.76 MB O 62.28 MB C 62.10 MB D data/linux-4.1.8/fs/ext4/inline.c \r" - ], - [ - 0.200891, - "140.78 MB O 64.15 MB C 63.97 MB D data/linux-4.1.8/fs/jbd2/commit.c \r" - ], - [ - 0.199883, - "144.88 MB O 65.98 MB C 65.80 MB D data/linux-4.1.8/fs/nfs/objlayout \r" - ], - [ - 0.201488, - "150.31 MB O 67.96 MB C 67.78 MB D data/linux-4.1.8/fs/...fy/dnotify/dnotify.c\r" - ], - [ - 0.205472, - "154.72 MB O 69.97 MB C 69.79 MB D data/linux-4.1.8/fs/quota/dquot.c \r" - ], - [ - 0.200493, - "159.06 MB O 71.91 MB C 71.73 MB D data/linux-4.1.8/fs/...xfs/xfs_inode_fork.h\r" - ], - [ - 0.200000, - "161.54 MB O 73.09 MB C 72.91 MB D data/linux-4.1.8/inc.../crypto/public_key.h\r" - ], - [ - 0.205041, - "164.32 MB O 74.28 MB C 74.09 MB D data/linux-4.1.8/inc...inux/cgroup_subsys.h\r" - ], - [ - 0.200371, - "166.33 MB O 75.23 MB C 75.05 MB D data/linux-4.1.8/include/linux/if_team.h \r" - ], - [ - 0.200340, - "168.82 MB O 76.24 MB C 76.06 MB D data/linux-4.1.8/inc.../mfd/pcf50633/gpio.h\r" - ], - [ - 0.200162, - "171.65 MB O 77.36 MB C 77.17 MB D data/linux-4.1.8/include/linux/phy.h \r" - ], - [ - 0.200385, - "172.84 MB O 77.97 MB C 77.79 MB D data/linux-4.1.8/include/linux/scc.h \r" - ], - [ - 0.200918, - "174.87 MB O 78.94 MB C 78.76 MB D data/linux-4.1.8/include/linux/wait.h \r" - ], - [ - 0.200117, - "177.06 MB O 80.01 MB C 79.83 MB D data/linux-4.1.8/inc...er/nfnetlink_queue.h\r" - ], - [ - 0.200254, - "179.53 MB O 81.13 MB C 80.95 MB D data/linux-4.1.8/inc...e/events/intel-sst.h\r" - ], - [ - 0.200176, - "181.40 MB O 82.05 MB C 81.86 MB D data/linux-4.1.8/include/uapi/linux/mpls.h \r" - ], - [ - 0.200438, - "183.11 MB O 82.88 MB C 82.70 MB D data/linux-4.1.8/inc...api/scsi/fc/fc_els.h\r" - ], - [ - 0.200226, - "186.12 MB O 84.31 MB C 84.12 MB D data/linux-4.1.8/kernel/jump_label.c \r" - ], - [ - 0.200138, - "190.76 MB O 86.46 MB C 86.28 MB D data/linux-4.1.8/lib/Kconfig.debug \r" - ], - [ - 0.200958, - "194.21 MB O 87.82 MB C 87.64 MB D data/linux-4.1.8/mm/memblock.c \r" - ], - [ - 0.200544, - "198.19 MB O 89.69 MB C 89.51 MB D data/linux-4.1.8/net/bluetooth/ecc.c \r" - ], - [ - 0.200232, - "202.28 MB O 91.52 MB C 91.34 MB D data/linux-4.1.8/net/hsr/hsr_slave.c \r" - ], - [ - 0.200153, - "206.23 MB O 93.40 MB C 93.22 MB D data/linux-4.1.8/net/ipx/af_ipx.c \r" - ], - [ - 0.200526, - "210.30 MB O 95.08 MB C 94.89 MB D data/linux-4.1.8/net...ter/ipvs/ip_vs_ftp.c\r" - ], - [ - 0.200433, - "213.29 MB O 96.37 MB C 96.19 MB D data/linux-4.1.8/net/phonet/af_phonet.c \r" - ], - [ - 0.200669, - "217.21 MB O 98.21 MB C 98.03 MB D data/linux-4.1.8/net.../svc_rdma_recvfrom.c\r" - ], - [ - 0.200014, - "220.20 MB O 99.53 MB C 99.35 MB D data/linux-4.1.8/scr...e/free/iounmap.cocci\r" - ], - [ - 0.200446, - "222.94 MB O 100.82 MB C 100.64 MB D data/linux-4.1.8/security/selinux/Makefile \r" - ], - [ - 0.214711, - "226.41 MB O 102.23 MB C 102.05 MB D data/linux-4.1.8/sou...seq/seq_midi_event.c\r" - ], - [ - 0.202631, - "228.96 MB O 103.31 MB C 103.13 MB D data/linux-4.1.8/sound/mips/ad1843.c \r" - ], - [ - 0.200095, - "232.28 MB O 104.65 MB C 104.47 MB D data/linux-4.1.8/sound/pci/ctxfi/Makefile \r" - ], - [ - 0.200726, - "236.33 MB O 106.24 MB C 106.06 MB D data/linux-4.1.8/sound/pci/nm256/Makefile \r" - ], - [ - 0.199902, - "239.73 MB O 107.58 MB C 107.40 MB D data/linux-4.1.8/sou.../codecs/cs4271-spi.c\r" - ], - [ - 0.200592, - "244.29 MB O 109.08 MB C 108.90 MB D data/linux-4.1.8/sound/soc/codecs/wm8940.c \r" - ], - [ - 0.200357, - "247.98 MB O 110.35 MB C 110.17 MB D data/linux-4.1.8/sou...oc/omap/omap-mcpdm.c\r" - ], - [ - 0.200901, - "250.64 MB O 111.50 MB C 111.32 MB D data/linux-4.1.8/sound/usb/mixer_scarlett.c\r" - ], - [ - 0.200535, - "252.14 MB O 112.20 MB C 112.01 MB D data/linux-4.1.8/tools/perf/builtin-kvm.c \r" - ], - [ - 0.200239, - "254.11 MB O 113.07 MB C 112.88 MB D data/linux-4.1.8/tools/perf/util/record.c \r" - ], - [ - 0.200233, - "255.70 MB O 113.82 MB C 113.64 MB D data/linux-4.1.8/too...re/bin/configinit.sh\r" - ], - [ - 0.395702, - " \r------------------------------------------------------------------------------\r\nArchive name: backup1\r\nArchive fingerprint: b3104802be9faa610f281619c69e4d3e672df2ce97528a35d83f15080d02ed86\r\nStart time: Sat Oct 24 22:27:24 2015\r\nEnd time: Sat Oct 24 22:27:43 2015\r\nDuration: 19.32 seconds\r\nNumber of files: 31557\r\n\r\n Original size Compressed size Deduplicated size\r\nThis archive: 257.06 MB 114.44 MB 114.26 MB\r\nAll archives: 257.06 MB 114.44 MB 114.26 MB\r\n\r\n Unique chunks Total chunks\r\nChunk index: 33731 34030\r\n------------------------------------------------------------------------------\r\n" - ], - [ - 0.046138, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 1.000000, - "#" - ], - [ - 0.564664, - " " - ], - [ - 0.313339, - "c" - ], - [ - 0.492152, - "h" - ], - [ - 0.479518, - "a" - ], - [ - 0.536708, - "n" - ], - [ - 0.134006, - "g" - ], - [ - 0.147326, - "e" - ], - [ - 0.068957, - " " - ], - [ - 0.179678, - "s" - ], - [ - 0.096249, - "o" - ], - [ - 0.081003, - "m" - ], - [ - 0.124342, - "e" - ], - [ - 0.117830, - " " - ], - [ - 0.138019, - "d" - ], - [ - 0.137898, - "a" - ], - [ - 0.199628, - "t" - ], - [ - 0.104935, - "a" - ], - [ - 0.150868, - " " - ], - [ - 0.144877, - "s" - ], - [ - 0.126816, - "l" - ], - [ - 0.178466, - "i" - ], - [ - 0.113395, - "g" - ], - [ - 0.101022, - "h" - ], - [ - 0.102395, - "t" - ], - [ - 0.311498, - "l" - ], - [ - 0.366608, - "y" - ], - [ - 0.657991, - ":" - ], - [ - 0.423140, - "\r\n" - ], - [ - 0.000708, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 2.000000, - "e" - ], - [ - 0.000021, - "c" - ], - [ - 0.000024, - "h" - ], - [ - 0.000029, - "o" - ], - [ - 0.000018, - " " - ], - [ - 0.000025, - "\"" - ], - [ - 0.000025, - "s" - ], - [ - 0.000026, - "o" - ], - [ - 0.000070, - "me " - ], - [ - 0.000022, - "m" - ], - [ - 0.000028, - "o" - ], - [ - 0.000027, - "r" - ], - [ - 0.000026, - "e" - ], - [ - 0.000029, - " " - ], - [ - 0.000028, - "d" - ], - [ - 0.000028, - "a" - ], - [ - 0.000028, - "t" - ], - [ - 0.000026, - "a" - ], - [ - 0.000033, - "\"" - ], - [ - 0.000028, - " " - ], - [ - 0.000059, - "\u003e" - ], - [ - 0.000045, - " " - ], - [ - 0.000020, - "d" - ], - [ - 0.000040, - "a" - ], - [ - 0.000035, - "t" - ], - [ - 0.000039, - "a" - ], - [ - 0.000034, - "/" - ], - [ - 0.000034, - "o" - ], - [ - 0.000034, - "n" - ], - [ - 0.000037, - "e" - ], - [ - 0.000036, - "_" - ], - [ - 0.000037, - "f" - ], - [ - 0.000037, - "i" - ], - [ - 0.000717, - "le_more\r\n" - ], - [ - 0.000181, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 2.000000, - "#" - ], - [ - 0.266289, - " " - ], - [ - 0.194686, - "n" - ], - [ - 0.157296, - "o" - ], - [ - 0.084026, - "w" - ], - [ - 0.092729, - " " - ], - [ - 0.148154, - "c" - ], - [ - 0.169136, - "r" - ], - [ - 0.214327, - "e" - ], - [ - 0.180678, - "a" - ], - [ - 0.161652, - "t" - ], - [ - 0.128260, - "e" - ], - [ - 0.158131, - " " - ], - [ - 0.118838, - "a" - ], - [ - 0.120885, - " " - ], - [ - 0.797511, - "s" - ], - [ - 0.200585, - "e" - ], - [ - 0.171811, - "c" - ], - [ - 0.106721, - "o" - ], - [ - 0.153298, - "n" - ], - [ - 0.052244, - "d" - ], - [ - 0.149675, - " " - ], - [ - 0.183517, - "b" - ], - [ - 0.076768, - "a" - ], - [ - 0.189428, - "c" - ], - [ - 0.088431, - "k" - ], - [ - 0.229617, - "u" - ], - [ - 0.272021, - "p" - ], - [ - 0.965855, - ":" - ], - [ - 0.674517, - "\r\n" - ], - [ - 0.000322, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.946131, - "b" - ], - [ - 0.111159, - "o" - ], - [ - 0.094622, - "r" - ], - [ - 0.085288, - "g" - ], - [ - 0.165429, - " " - ], - [ - 0.936087, - "c" - ], - [ - 0.192608, - "r" - ], - [ - 0.187511, - "e" - ], - [ - 0.173135, - "a" - ], - [ - 0.179441, - "t" - ], - [ - 0.125923, - "e" - ], - [ - 0.164920, - " " - ], - [ - 0.737259, - "-" - ], - [ - 0.185417, - "-" - ], - [ - 0.233405, - "s" - ], - [ - 0.152945, - "t" - ], - [ - 0.181548, - "a" - ], - [ - 0.330237, - "t" - ], - [ - 0.735524, - "s" - ], - [ - 0.179019, - " " - ], - [ - 0.245324, - "-" - ], - [ - 0.142362, - "-" - ], - [ - 0.233989, - "p" - ], - [ - 0.153782, - "r" - ], - [ - 0.064431, - "o" - ], - [ - 0.104827, - "g" - ], - [ - 0.090533, - "r" - ], - [ - 0.168129, - "e" - ], - [ - 0.206325, - "s" - ], - [ - 0.157551, - "s" - ], - [ - 0.383630, - " " - ], - [ - 0.759364, - "r" - ], - [ - 0.199262, - "e" - ], - [ - 0.139781, - "p" - ], - [ - 0.151367, - "o" - ], - [ - 0.720350, - ":" - ], - [ - 0.144801, - ":" - ], - [ - 0.532566, - "b" - ], - [ - 0.226514, - "a" - ], - [ - 0.209449, - "c" - ], - [ - 0.142062, - "k" - ], - [ - 0.300090, - "u" - ], - [ - 0.262794, - "p" - ], - [ - 0.218785, - "2" - ], - [ - 0.249599, - " " - ], - [ - 0.187125, - "d" - ], - [ - 0.157741, - "a" - ], - [ - 0.175739, - "t" - ], - [ - 0.139896, - "a" - ], - [ - 0.795560, - "\r\n" - ], - [ - 0.571808, - " 6.50 MB O 3.09 MB C 0 B D data/linux-4.1.8/Doc...ngs/arm/armadeus.txt\r" - ], - [ - 0.200103, - " 11.82 MB O 6.17 MB C 0 B D data/linux-4.1.8/Documentation/hwmon/w83795\r" - ], - [ - 0.200121, - " 27.38 MB O 13.89 MB C 0 B D data/linux-4.1.8/arc...ot/dts/nspire-cx.dts\r" - ], - [ - 0.200110, - " 39.92 MB O 19.04 MB C 0 B D data/linux-4.1.8/arc...omap2/opp2430_data.c\r" - ], - [ - 0.200088, - " 52.28 MB O 24.23 MB C 0 B D data/linux-4.1.8/arc...fin/mach-bf561/smp.c\r" - ], - [ - 0.200078, - " 67.02 MB O 29.94 MB C 0 B D data/linux-4.1.8/arc...8k/include/asm/pci.h\r" - ], - [ - 0.200116, - " 78.29 MB O 34.52 MB C 0 B D data/linux-4.1.8/arc...etlogic/xlr/wakeup.c\r" - ], - [ - 0.200081, - " 90.07 MB O 40.11 MB C 0 B D data/linux-4.1.8/arc...eature-fixups-test.S\r" - ], - [ - 0.200092, - "101.15 MB O 45.19 MB C 0 B D data/linux-4.1.8/arc...rc/crypto/sha1_asm.S\r" - ], - [ - 0.200078, - "115.05 MB O 51.63 MB C 0 B D data/linux-4.1.8/arc...6/mm/kasan_init_64.c\r" - ], - [ - 0.200062, - "147.39 MB O 66.98 MB C 0 B D data/linux-4.1.8/fs/nls/nls_cp864.c \r" - ], - [ - 0.200117, - "169.16 MB O 76.38 MB C 0 B D data/linux-4.1.8/inc.../mfd/twl4030-audio.h\r" - ], - [ - 0.200074, - "181.43 MB O 82.06 MB C 0 B D data/linux-4.1.8/include/uapi/linux/mtio.h \r" - ], - [ - 0.200131, - "209.10 MB O 94.58 MB C 0 B D data/linux-4.1.8/net/mac80211/scan.c \r" - ], - [ - 0.200079, - "234.87 MB O 105.68 MB C 0 B D data/linux-4.1.8/sou...i/hda/patch_si3054.c\r" - ], - [ - 0.200110, - "255.66 MB O 113.80 MB C 0 B D data/linux-4.1.8/too...ves/asm/asm-compat.h\r" - ], - [ - 0.201350, - " \r------------------------------------------------------------------------------\r\nArchive name: backup2\r\nArchive fingerprint: 5737afe8ad2cda7667973b7f2e1d83f097ef3117b5753a38ba7664b616fbdc5a\r\nStart time: Sat Oct 24 22:28:24 2015\r\nEnd time: Sat Oct 24 22:28:27 2015\r\nDuration: 3.41 seconds\r\nNumber of files: 31557\r\n" - ], - [ - 0.001858, - "\r\n Original size Compressed size Deduplicated size\r\nThis archive: 257.06 MB 114.47 MB 45.19 kB\r\nAll archives: 514.12 MB 228.92 MB 114.31 MB\r\n\r\n Unique chunks Total chunks\r\nChunk index: 33733 68060\r\n------------------------------------------------------------------------------\r\n" - ], - [ - 0.033369, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 1.013315, - "#" - ], - [ - 0.482095, - " " - ], - [ - 0.319571, - "w" - ], - [ - 0.140740, - "o" - ], - [ - 0.090380, - "w" - ], - [ - 0.304400, - "," - ], - [ - 0.137310, - " " - ], - [ - 0.662280, - "t" - ], - [ - 0.162678, - "h" - ], - [ - 0.114083, - "a" - ], - [ - 0.077660, - "t" - ], - [ - 0.120839, - " " - ], - [ - 0.207626, - "w" - ], - [ - 0.195480, - "a" - ], - [ - 0.060188, - "s" - ], - [ - 0.149129, - " " - ], - [ - 0.094522, - "a" - ], - [ - 0.098801, - " " - ], - [ - 0.266235, - "l" - ], - [ - 0.184774, - "o" - ], - [ - 0.255040, - "t" - ], - [ - 0.170498, - " " - ], - [ - 0.201599, - "f" - ], - [ - 0.189774, - "a" - ], - [ - 0.229140, - "s" - ], - [ - 0.275243, - "t" - ], - [ - 0.177347, - "e" - ], - [ - 0.090806, - "r" - ], - [ - 0.204494, - "!" - ], - [ - 0.479851, - "\r\n" - ], - [ - 0.000316, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.734961, - "#" - ], - [ - 0.300000, - " " - ], - [ - 0.300000, - "n" - ], - [ - 0.199297, - "o" - ], - [ - 0.148047, - "t" - ], - [ - 0.071101, - "i" - ], - [ - 0.185554, - "c" - ], - [ - 0.395933, - "e" - ], - [ - 0.180285, - " " - ], - [ - 0.199321, - "t" - ], - [ - 0.094767, - "h" - ], - [ - 0.166966, - "a" - ], - [ - 0.102814, - "t" - ], - [ - 0.415016, - " " - ], - [ - 0.286089, - "\"" - ], - [ - 0.795323, - "D" - ], - [ - 0.180152, - "e" - ], - [ - 0.311214, - "d" - ], - [ - 0.214812, - "u" - ], - [ - 0.251616, - "p" - ], - [ - 0.203533, - "l" - ], - [ - 0.187084, - "i" - ], - [ - 0.124066, - "c" - ], - [ - 0.158062, - "a" - ], - [ - 0.260540, - "t" - ], - [ - 0.136405, - "e" - ], - [ - 0.278039, - "d" - ], - [ - 0.323148, - " " - ], - [ - 0.172337, - "s" - ], - [ - 0.074541, - "i" - ], - [ - 0.269245, - "z" - ], - [ - 0.123599, - "e" - ], - [ - 0.533647, - "\"" - ], - [ - 0.234738, - " " - ], - [ - 0.150720, - "f" - ], - [ - 0.144329, - "o" - ], - [ - 0.086533, - "r" - ], - [ - 0.159717, - " " - ], - [ - 0.274291, - "\"" - ], - [ - 0.471163, - "T" - ], - [ - 0.162135, - "h" - ], - [ - 0.233501, - "i" - ], - [ - 0.134923, - "s" - ], - [ - 0.190779, - " " - ], - [ - 0.307322, - "a" - ], - [ - 0.153882, - "r" - ], - [ - 0.246471, - "c" - ], - [ - 0.110018, - "h" - ], - [ - 0.259798, - "i" - ], - [ - 0.132853, - "v" - ], - [ - 0.171373, - "e" - ], - [ - 0.560405, - "\"" - ], - [ - 0.609162, - "!" - ], - [ - 0.559020, - "\r\n" - ], - [ - 0.000296, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.941369, - "#" - ], - [ - 0.254237, - " " - ], - [ - 0.176998, - "b" - ], - [ - 0.124943, - "o" - ], - [ - 0.097140, - "r" - ], - [ - 0.057513, - "g" - ], - [ - 0.232990, - " " - ], - [ - 0.549539, - "r" - ], - [ - 0.112992, - "e" - ], - [ - 0.240733, - "c" - ], - [ - 0.164146, - "o" - ], - [ - 0.209755, - "g" - ], - [ - 0.145638, - "n" - ], - [ - 0.151826, - "i" - ], - [ - 0.471625, - "z" - ], - [ - 0.759625, - "e" - ], - [ - 0.229566, - "d" - ], - [ - 0.254596, - " " - ], - [ - 0.209452, - "t" - ], - [ - 0.088606, - "h" - ], - [ - 0.155981, - "a" - ], - [ - 0.086797, - "t" - ], - [ - 0.098574, - " " - ], - [ - 0.243290, - "m" - ], - [ - 0.120288, - "o" - ], - [ - 0.092890, - "s" - ], - [ - 0.058823, - "t" - ], - [ - 0.125344, - " " - ], - [ - 0.211464, - "f" - ], - [ - 0.086483, - "i" - ], - [ - 0.213685, - "l" - ], - [ - 0.096764, - "e" - ], - [ - 0.176075, - "s" - ], - [ - 0.122962, - " " - ], - [ - 0.174342, - "d" - ], - [ - 0.103474, - "i" - ], - [ - 0.089744, - "d" - ], - [ - 0.181539, - " " - ], - [ - 0.461771, - "n" - ], - [ - 0.219395, - "o" - ], - [ - 0.095042, - "t" - ], - [ - 0.119662, - " " - ], - [ - 0.156060, - "c" - ], - [ - 0.116988, - "h" - ], - [ - 0.118775, - "a" - ], - [ - 0.126173, - "n" - ], - [ - 0.118518, - "g" - ], - [ - 0.109977, - "e" - ], - [ - 0.167095, - " " - ], - [ - 0.208137, - "a" - ], - [ - 0.155464, - "n" - ], - [ - 0.074939, - "d" - ], - [ - 0.616534, - "\r\n" - ], - [ - 0.000405, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.836535, - "#" - ], - [ - 0.248630, - " " - ], - [ - 0.211525, - "d" - ], - [ - 0.171252, - "e" - ], - [ - 0.244098, - "d" - ], - [ - 0.121718, - "u" - ], - [ - 0.219002, - "p" - ], - [ - 0.197839, - "l" - ], - [ - 0.161081, - "i" - ], - [ - 0.112763, - "c" - ], - [ - 0.154565, - "a" - ], - [ - 0.230427, - "t" - ], - [ - 0.180004, - "e" - ], - [ - 0.182279, - "d" - ], - [ - 0.201281, - " " - ], - [ - 0.202485, - "t" - ], - [ - 0.078397, - "h" - ], - [ - 0.178577, - "e" - ], - [ - 0.150264, - "m" - ], - [ - 0.482274, - "." - ], - [ - 0.300000, - "\r\n" - ], - [ - 0.000265, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.287124, - "#" - ], - [ - 0.314208, - " " - ], - [ - 0.219731, - "n" - ], - [ - 0.176210, - "o" - ], - [ - 0.108529, - "w" - ], - [ - 0.224056, - "," - ], - [ - 0.210976, - " " - ], - [ - 0.190508, - "l" - ], - [ - 0.098452, - "e" - ], - [ - 0.101431, - "t" - ], - [ - 0.855722, - "'" - ], - [ - 0.220403, - "s" - ], - [ - 0.229447, - " " - ], - [ - 0.134839, - "e" - ], - [ - 0.241915, - "x" - ], - [ - 0.217004, - "t" - ], - [ - 0.183774, - "r" - ], - [ - 0.231721, - "a" - ], - [ - 0.221361, - "c" - ], - [ - 0.436221, - "t" - ], - [ - 0.097256, - " " - ], - [ - 0.163933, - "a" - ], - [ - 0.099964, - " " - ], - [ - 0.216806, - "b" - ], - [ - 0.086493, - "a" - ], - [ - 0.211732, - "c" - ], - [ - 0.139016, - "k" - ], - [ - 0.379423, - "u" - ], - [ - 0.250049, - "p" - ], - [ - 0.717916, - ":" - ], - [ - 0.307136, - "\r\n" - ], - [ - 0.000301, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.967533, - "m" - ], - [ - 0.103707, - "v" - ], - [ - 0.407957, - " " - ], - [ - 0.177312, - "d" - ], - [ - 0.166158, - "a" - ], - [ - 0.242593, - "t" - ], - [ - 0.090471, - "a" - ], - [ - 0.699594, - " " - ], - [ - 0.273219, - "d" - ], - [ - 0.170371, - "a" - ], - [ - 0.169331, - "t" - ], - [ - 0.126739, - "a" - ], - [ - 0.288488, - "." - ], - [ - 0.305856, - "o" - ], - [ - 0.135252, - "r" - ], - [ - 0.152717, - "i" - ], - [ - 0.090343, - "g" - ], - [ - 0.312536, - "\r\n" - ], - [ - 0.002579, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.944876, - "b" - ], - [ - 0.149049, - "o" - ], - [ - 0.114834, - "r" - ], - [ - 0.074682, - "g" - ], - [ - 0.129000, - " " - ], - [ - 0.129618, - "e" - ], - [ - 0.261479, - "x" - ], - [ - 0.203937, - "t" - ], - [ - 0.196213, - "r" - ], - [ - 0.193561, - "a" - ], - [ - 0.215314, - "c" - ], - [ - 0.236817, - "t" - ], - [ - 0.188232, - " " - ], - [ - 0.177286, - "r" - ], - [ - 0.200598, - "e" - ], - [ - 0.105866, - "p" - ], - [ - 0.173864, - "o" - ], - [ - 0.388954, - ":" - ], - [ - 0.144865, - ":" - ], - [ - 0.347420, - "b" - ], - [ - 0.105814, - "a" - ], - [ - 0.198728, - "c" - ], - [ - 0.096349, - "k" - ], - [ - 0.261559, - "u" - ], - [ - 0.241998, - "p" - ], - [ - 0.240033, - "2" - ], - [ - 0.981903, - "\r\n" - ], - [ - 2.000000, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.937291, - "#" - ], - [ - 0.994897, - " " - ], - [ - 0.151752, - "c" - ], - [ - 0.096956, - "h" - ], - [ - 0.337975, - "e" - ], - [ - 0.207037, - "c" - ], - [ - 0.177028, - "k" - ], - [ - 0.740370, - " " - ], - [ - 0.330206, - "i" - ], - [ - 0.177976, - "f" - ], - [ - 0.218757, - " " - ], - [ - 0.329345, - "o" - ], - [ - 0.098735, - "r" - ], - [ - 0.098576, - "i" - ], - [ - 0.103157, - "g" - ], - [ - 0.107275, - "i" - ], - [ - 0.117332, - "n" - ], - [ - 0.194072, - "a" - ], - [ - 0.211456, - "l" - ], - [ - 0.197712, - " " - ], - [ - 0.189172, - "d" - ], - [ - 0.163930, - "a" - ], - [ - 0.188334, - "t" - ], - [ - 0.165129, - "a" - ], - [ - 0.220652, - " " - ], - [ - 0.224411, - "a" - ], - [ - 0.136137, - "n" - ], - [ - 0.155260, - "d" - ], - [ - 0.074238, - " " - ], - [ - 0.104154, - "r" - ], - [ - 0.690499, - "e" - ], - [ - 0.193678, - "s" - ], - [ - 0.165163, - "t" - ], - [ - 0.165594, - "o" - ], - [ - 0.111779, - "r" - ], - [ - 0.135625, - "e" - ], - [ - 0.202851, - "d" - ], - [ - 0.096040, - " " - ], - [ - 0.165090, - "d" - ], - [ - 0.155594, - "a" - ], - [ - 0.220606, - "t" - ], - [ - 0.163143, - "a" - ], - [ - 0.174099, - " " - ], - [ - 0.209780, - "d" - ], - [ - 0.166062, - "i" - ], - [ - 0.084688, - "f" - ], - [ - 0.140851, - "f" - ], - [ - 0.204458, - "e" - ], - [ - 0.088661, - "r" - ], - [ - 0.334162, - "s" - ], - [ - 0.904233, - ":" - ], - [ - 0.590489, - "\r\n" - ], - [ - 0.000283, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.503183, - "d" - ], - [ - 0.082614, - "i" - ], - [ - 0.216272, - "f" - ], - [ - 0.123813, - "f" - ], - [ - 0.183603, - " " - ], - [ - 0.302144, - "-" - ], - [ - 0.150946, - "r" - ], - [ - 0.152436, - " " - ], - [ - 2.000000, - "d" - ], - [ - 0.196047, - "a" - ], - [ - 0.206372, - "t" - ], - [ - 0.146051, - "a" - ], - [ - 0.326306, - "." - ], - [ - 0.363408, - "o" - ], - [ - 0.269988, - "rig/" - ], - [ - 0.776581, - " " - ], - [ - 0.137720, - "d" - ], - [ - 0.156080, - "a" - ], - [ - 0.242275, - "\u0007" - ], - [ - 0.000020, - "ta" - ], - [ - 0.872887, - "/" - ], - [ - 0.273993, - "\r\n" - ], - [ - 2.000000, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.488581, - "#" - ], - [ - 0.234021, - " " - ], - [ - 0.380938, - "n" - ], - [ - 0.240685, - "o" - ], - [ - 0.204436, - " " - ], - [ - 0.390794, - "o" - ], - [ - 0.225563, - "u" - ], - [ - 0.167295, - "t" - ], - [ - 0.140625, - "p" - ], - [ - 0.183668, - "u" - ], - [ - 0.106161, - "t" - ], - [ - 0.132063, - " " - ], - [ - 0.204757, - "m" - ], - [ - 0.082693, - "e" - ], - [ - 0.216428, - "a" - ], - [ - 0.121584, - "n" - ], - [ - 0.127398, - "s" - ], - [ - 0.264644, - " " - ], - [ - 0.201524, - "i" - ], - [ - 0.110738, - "t" - ], - [ - 0.120653, - " " - ], - [ - 0.311187, - "d" - ], - [ - 0.119826, - "o" - ], - [ - 0.082654, - "e" - ], - [ - 0.182518, - "s" - ], - [ - 0.096372, - " " - ], - [ - 0.192821, - "n" - ], - [ - 0.193829, - "o" - ], - [ - 0.065739, - "t" - ], - [ - 0.678808, - "." - ], - [ - 0.246797, - " " - ], - [ - 0.520369, - "f" - ], - [ - 0.058288, - "i" - ], - [ - 0.064783, - "n" - ], - [ - 0.104851, - "e" - ], - [ - 0.292910, - "." - ], - [ - 0.174086, - " " - ], - [ - 0.226556, - ":" - ], - [ - 0.249808, - ")" - ], - [ - 2.000000, - "\r\n" - ], - [ - 0.000261, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.477759, - "#" - ], - [ - 0.223699, - " " - ], - [ - 0.237979, - "l" - ], - [ - 0.152769, - "i" - ], - [ - 0.135150, - "s" - ], - [ - 0.068576, - "t" - ], - [ - 0.100516, - "i" - ], - [ - 0.078648, - "n" - ], - [ - 0.099435, - "g" - ], - [ - 0.157388, - " " - ], - [ - 0.115327, - "t" - ], - [ - 0.133738, - "h" - ], - [ - 0.135662, - "e" - ], - [ - 0.100677, - " " - ], - [ - 0.180392, - "r" - ], - [ - 0.190922, - "e" - ], - [ - 0.093920, - "p" - ], - [ - 0.173588, - "o" - ], - [ - 0.193023, - " " - ], - [ - 0.206907, - "c" - ], - [ - 0.106376, - "o" - ], - [ - 0.175291, - "n" - ], - [ - 0.080726, - "t" - ], - [ - 0.179258, - "e" - ], - [ - 0.101491, - "n" - ], - [ - 0.096807, - "t" - ], - [ - 0.211455, - "s" - ], - [ - 0.508210, - ":" - ], - [ - 0.373837, - "\r\n" - ], - [ - 0.000249, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.559782, - "b" - ], - [ - 0.116587, - "o" - ], - [ - 0.139513, - "r" - ], - [ - 0.072751, - "g" - ], - [ - 0.103968, - " " - ], - [ - 0.984928, - "l" - ], - [ - 0.173603, - "i" - ], - [ - 0.112444, - "s" - ], - [ - 0.066704, - "t" - ], - [ - 0.114771, - " " - ], - [ - 0.263745, - "r" - ], - [ - 0.113121, - "e" - ], - [ - 0.126283, - "p" - ], - [ - 0.187453, - "o" - ], - [ - 0.409044, - "\r\n" - ], - [ - 0.360675, - "backup1 Sat Oct 24 22:27:43 2015" - ], - [ - 0.000011, - "\r\n" - ], - [ - 0.000006, - "backup2 Sat Oct 24 22:28:27 2015" - ], - [ - 0.000005, - "\r\n" - ], - [ - 0.027766, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.637813, - "#" - ], - [ - 0.257629, - " " - ], - [ - 0.231710, - "l" - ], - [ - 0.213387, - "i" - ], - [ - 0.132149, - "s" - ], - [ - 0.244957, - "t" - ], - [ - 0.180264, - "i" - ], - [ - 0.082882, - "n" - ], - [ - 0.142810, - "g" - ], - [ - 0.134815, - " " - ], - [ - 0.167455, - "s" - ], - [ - 0.114155, - "o" - ], - [ - 0.106847, - "m" - ], - [ - 0.070629, - "e" - ], - [ - 0.507340, - " " - ], - [ - 0.234237, - "b" - ], - [ - 0.070181, - "a" - ], - [ - 0.220534, - "c" - ], - [ - 0.092316, - "k" - ], - [ - 0.257003, - "u" - ], - [ - 0.233598, - "p" - ], - [ - 0.201484, - " " - ], - [ - 0.124810, - "a" - ], - [ - 0.084732, - "r" - ], - [ - 0.249719, - "c" - ], - [ - 0.119605, - "h" - ], - [ - 0.203875, - "i" - ], - [ - 0.076269, - "v" - ], - [ - 0.174299, - "e" - ], - [ - 0.109711, - " " - ], - [ - 0.238294, - "c" - ], - [ - 0.102351, - "o" - ], - [ - 0.155761, - "n" - ], - [ - 0.060278, - "t" - ], - [ - 0.179564, - "e" - ], - [ - 0.112342, - "n" - ], - [ - 0.078100, - "t" - ], - [ - 0.190203, - "s" - ], - [ - 0.865560, - " " - ], - [ - 0.297799, - "(" - ], - [ - 0.225741, - "s" - ], - [ - 0.080329, - "h" - ], - [ - 0.233668, - "o" - ], - [ - 0.127773, - "r" - ], - [ - 0.190065, - "t" - ], - [ - 0.187679, - "e" - ], - [ - 0.147219, - "n" - ], - [ - 0.064472, - "e" - ], - [ - 0.188512, - "d" - ], - [ - 0.459222, - ")" - ], - [ - 0.723165, - ":" - ], - [ - 0.645995, - "\r\n" - ], - [ - 0.000096, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.446688, - "b" - ], - [ - 0.145841, - "o" - ], - [ - 0.105605, - "r" - ], - [ - 0.088953, - "g" - ], - [ - 0.120803, - " " - ], - [ - 0.227780, - "l" - ], - [ - 0.175052, - "i" - ], - [ - 0.106579, - "s" - ], - [ - 0.058441, - "t" - ], - [ - 0.093196, - " " - ], - [ - 0.172940, - "r" - ], - [ - 0.134731, - "e" - ], - [ - 0.119062, - "p" - ], - [ - 0.183075, - "o" - ], - [ - 0.388321, - ":" - ], - [ - 0.140589, - ":" - ], - [ - 0.324109, - "b" - ], - [ - 0.058606, - "a" - ], - [ - 0.205450, - "c" - ], - [ - 0.105362, - "k" - ], - [ - 0.235009, - "u" - ], - [ - 0.243485, - "p" - ], - [ - 0.485432, - "2" - ], - [ - 0.148177, - " " - ], - [ - 0.632383, - "|" - ], - [ - 0.389914, - " " - ], - [ - 0.174128, - "t" - ], - [ - 0.201473, - "a" - ], - [ - 0.116517, - "i" - ], - [ - 0.225072, - "l" - ], - [ - 0.699624, - "\r\n" - ], - [ - 2.000000, - "-rw-rw-r-- tw tw 5516 Jul 21 19:10 data/linux-4.1.8/virt/kvm/async_pf.c\r\n" - ], - [ - 0.000019, - "-rw-rw-r-- tw tw 1120 Jul 21 19:10 data/linux-4.1.8/virt/kvm/async_pf.h\r\n-rw-rw-r-- tw tw 4215 Jul 21 19:10 data/linux-4.1.8/virt/kvm/coalesced_mmio.c\r\n-rw-rw-r-- tw tw 915 Jul 21 19:10 data/linux-4.1.8/virt/kvm/coalesced_mmio.h\r\n-rw-rw-r-- tw tw 22879 Jul 21 19:10 data/linux-4.1.8/virt/kvm/eventfd.c\r\n-rw-rw-r-- tw tw 5563 Jul 21 19:10 data/linux-4.1.8/virt/kvm/irqchip.c\r\n-rw-rw-r-- tw tw 79385 Jul 21 19:10 data/linux-4.1.8/virt/kvm/kvm_main.c\r\n" - ], - [ - 0.000011, - "-rw-rw-r-- tw tw 6132 Jul 21 19:10 data/linux-4.1.8/virt/kvm/vfio.c\r\n-rw-rw-r-- tw tw 250 Jul 21 19:10 data/linux-4.1.8/virt/kvm/vfio.h\r\n" - ], - [ - 0.000009, - "-rw-rw-r-- tw tw 15 Oct 24 22:28 data/one_file_more\r\n" - ], - [ - 0.000389, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 1.000000, - "#" - ], - [ - 0.337351, - " " - ], - [ - 0.147170, - "e" - ], - [ - 0.235736, - "a" - ], - [ - 0.251314, - "s" - ], - [ - 0.471185, - "y" - ], - [ - 0.277723, - "," - ], - [ - 0.204225, - " " - ], - [ - 0.182231, - "i" - ], - [ - 0.174424, - "s" - ], - [ - 0.074677, - "n" - ], - [ - 0.786274, - "'" - ], - [ - 0.264836, - "t" - ], - [ - 0.330352, - " " - ], - [ - 0.266876, - "i" - ], - [ - 0.112564, - "t" - ], - [ - 0.897299, - "?" - ], - [ - 0.623501, - " " - ], - [ - 0.656625, - "t" - ], - [ - 0.115934, - "h" - ], - [ - 0.625213, - "a" - ], - [ - 0.588409, - "t" - ], - [ - 0.160071, - " " - ], - [ - 0.830693, - "i" - ], - [ - 0.163118, - "s" - ], - [ - 0.075663, - " " - ], - [ - 0.186138, - "a" - ], - [ - 0.109916, - "l" - ], - [ - 0.137005, - "l" - ], - [ - 0.171009, - " " - ], - [ - 0.153348, - "y" - ], - [ - 0.132919, - "o" - ], - [ - 0.568100, - "u" - ], - [ - 0.211350, - " " - ], - [ - 0.195450, - "n" - ], - [ - 0.257974, - "e" - ], - [ - 0.185529, - "e" - ], - [ - 0.265130, - "d" - ], - [ - 0.129116, - " " - ], - [ - 0.169264, - "t" - ], - [ - 0.148964, - "o" - ], - [ - 0.437043, - " " - ], - [ - 0.431197, - "k" - ], - [ - 0.219557, - "n" - ], - [ - 0.257996, - "o" - ], - [ - 0.158826, - "w" - ], - [ - 0.406870, - " " - ], - [ - 0.659664, - "f" - ], - [ - 0.130963, - "o" - ], - [ - 0.125395, - "r" - ], - [ - 0.613713, - " " - ], - [ - 0.646957, - "b" - ], - [ - 0.154695, - "a" - ], - [ - 0.259741, - "s" - ], - [ - 0.156692, - "i" - ], - [ - 0.124345, - "c" - ], - [ - 0.513209, - "\r\n" - ], - [ - 0.000296, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.965828, - "#" - ], - [ - 0.232285, - " " - ], - [ - 0.266818, - "u" - ], - [ - 0.132723, - "s" - ], - [ - 0.216208, - "a" - ], - [ - 0.206362, - "g" - ], - [ - 0.142608, - "e" - ], - [ - 2.000000, - "." - ], - [ - 0.238868, - " " - ], - [ - 0.302986, - "i" - ], - [ - 0.196338, - "f" - ], - [ - 0.092936, - " " - ], - [ - 0.197594, - "y" - ], - [ - 0.122297, - "o" - ], - [ - 0.175360, - "u" - ], - [ - 0.145063, - " " - ], - [ - 0.313719, - "l" - ], - [ - 0.169678, - "i" - ], - [ - 0.185628, - "k" - ], - [ - 0.120660, - "e" - ], - [ - 0.078389, - " " - ], - [ - 0.648628, - "#" - ], - [ - 0.337514, - "b" - ], - [ - 0.108598, - "o" - ], - [ - 0.123792, - "r" - ], - [ - 0.136099, - "g" - ], - [ - 0.235539, - "b" - ], - [ - 0.091671, - "a" - ], - [ - 0.208697, - "c" - ], - [ - 0.100567, - "k" - ], - [ - 0.227477, - "u" - ], - [ - 0.236900, - "p" - ], - [ - 0.302154, - "," - ], - [ - 0.207291, - " " - ], - [ - 0.205656, - "s" - ], - [ - 0.123737, - "p" - ], - [ - 0.142016, - "r" - ], - [ - 0.197260, - "e" - ], - [ - 0.197471, - "a" - ], - [ - 0.104498, - "d" - ], - [ - 0.163267, - " " - ], - [ - 0.178420, - "t" - ], - [ - 0.091669, - "h" - ], - [ - 0.107735, - "e" - ], - [ - 0.102742, - " " - ], - [ - 0.211413, - "w" - ], - [ - 0.124959, - "o" - ], - [ - 0.105787, - "r" - ], - [ - 0.231403, - "d" - ], - [ - 0.299061, - "!" - ], - [ - 2.000000, - "\r\n" - ], - [ - 0.000307, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.304768, - "#" - ], - [ - 0.229433, - " " - ], - [ - 0.647220, - "t" - ], - [ - 0.070692, - "h" - ], - [ - 0.349432, - "a" - ], - [ - 0.112924, - "n" - ], - [ - 0.207031, - "k" - ], - [ - 0.567641, - "s" - ], - [ - 0.121708, - " " - ], - [ - 0.135723, - "f" - ], - [ - 0.139102, - "o" - ], - [ - 0.060453, - "r" - ], - [ - 0.152408, - " " - ], - [ - 0.116234, - "v" - ], - [ - 0.142885, - "i" - ], - [ - 0.106596, - "e" - ], - [ - 0.231115, - "w" - ], - [ - 0.416046, - "i" - ], - [ - 0.086563, - "n" - ], - [ - 0.144009, - "g" - ], - [ - 0.725139, - "!" - ], - [ - 0.299810, - "\r\n" - ], - [ - 0.000250, - "\u001b]0;tw@tux: ~/borg/demo\u0007tw@tux:~/borg/demo$ " - ], - [ - 0.710767, - "exit" - ], - [ - 0.000006, - "\r\n" - ] - ] -} diff --git a/docs/misc/asciinema/install_and_basics.txt b/docs/misc/asciinema/install_and_basics.txt deleted file mode 100644 index 135db1702a..0000000000 --- a/docs/misc/asciinema/install_and_basics.txt +++ /dev/null @@ -1,51 +0,0 @@ -# borgbackup - installation and basic usage - -# I have already downloaded the binary release from github: -ls -l -# binary file + GPG signature - -# verifying whether the binary is valid: -gpg --verify borg-linux64.asc borg-linux64 - -# install it as "borg": -cp borg-linux64 ~/bin/borg - -# making it executable: -chmod +x ~/bin/borg - -# yay, installation done! let's make backups! - -# creating a repository: -borg init repo - -# creating our first backup with stuff from "data" directory: -borg create --stats --progress --compression lz4 repo::backup1 data - -# changing the data slightly: -echo "some more data" > data/one_file_more - -# creating another backup: -borg create --stats --progress repo::backup2 data - -# that was much faster! it recognized/deduplicated unchanged files. -# see the "Deduplicated size" column for "This archive"! :) - -# extracting a backup archive: -mv data data.orig -borg extract repo::backup2 - -# checking if restored data differs from original data: -diff -r data.orig data - -# no, it doesn't! :) - -# listing the repo contents: -borg list repo - -# listing the backup2 archive contents (shortened): -borg list repo::backup2 | tail - -# easy, isn't it? - -# if you like #borgbackup, spread the word! - diff --git a/docs/misc/create_chunker-params.txt b/docs/misc/create_chunker-params.txt index 3e322b6608..af602c5c59 100644 --- a/docs/misc/create_chunker-params.txt +++ b/docs/misc/create_chunker-params.txt @@ -7,6 +7,7 @@ CHUNK_MIN_EXP and CHUNK_MAX_EXP give the exponent N of the 2^N minimum and maximum chunk size. Required: CHUNK_MIN_EXP < CHUNK_MAX_EXP. Defaults: 19 (2^19 == 512KiB) minimum, 23 (2^23 == 8MiB) maximum. +Currently it is not supported to give more than 23 as maximum. HASH_MASK_BITS is the number of least-significant bits of the rolling hash that need to be zero to trigger a chunk cut. diff --git a/docs/misc/logging.conf b/docs/misc/logging.conf new file mode 100644 index 0000000000..be8fb9cdc5 --- /dev/null +++ b/docs/misc/logging.conf @@ -0,0 +1,23 @@ +[loggers] +keys=root + +[handlers] +keys=logfile + +[formatters] +keys=logfile + +[logger_root] +level=NOTSET +handlers=logfile + +[handler_logfile] +class=FileHandler +level=INFO +formatter=logfile +args=('borg.log', 'w') + +[formatter_logfile] +format=%(asctime)s %(levelname)s %(message)s +datefmt= +class=logging.Formatter diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 597929eac2..e347faf219 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -13,6 +13,19 @@ A step by step example .. include:: quickstart_example.rst.inc +Archives and repositories +------------------------- + +A *Borg archive* is the result of a single backup (``borg create``). An archive +stores a snapshot of the data of the files "inside" it. One can later extract or +mount an archive to restore from a backup. + +*Repositories* are filesystem directories acting as self-contained stores of archives. +Repositories can be accessed locally via path or remotely via ssh. Under the hood, +repositories contain data blocks and a manifest tracking which blocks are in each +archive. If some data hasn't changed from one backup to another, Borg can simply +reference an already uploaded data chunk (deduplication). + Important note about free space ------------------------------- @@ -22,10 +35,11 @@ a good amount of free space on the filesystem that has your backup repository repositories. See also :ref:`cache-memory-usage`. Borg doesn't use space reserved for root on repository disks (even when run as root), -on file systems which do not support this mechanism (e.g. XFS) we recommend to -reserve some space in Borg itself just to be safe by adjusting the -``additional_free_space`` setting in the ``[repository]`` section of a repositories -``config`` file. A good starting point is ``2G``. +on file systems which do not support this mechanism (e.g. XFS) we recommend to reserve +some space in Borg itself just to be safe by adjusting the ``additional_free_space`` +setting (a good starting point is ``2G``):: + + borg config /path/to/repo additional_free_space 2G If |project_name| runs out of disk space, it tries to free as much space as it can while aborting the current operation safely, which allows to free more space @@ -54,6 +68,73 @@ Also helpful: - Other tasks fill the disk simultaneously - Hard quotas (which may not be reflected in statvfs(2)) +Important note about permissions +-------------------------------- + +Using root likely will be required if you want to backup files of other users +or the operating system. If you only back up your own files, you neither need +nor want to use root. + +Avoid to create a mixup of users and permissions in your repository (or cache). + +This can easily happen if you run borg using different user accounts (e.g. your +non-privileged user and root) while accessing the same repo. + +Of course, a non-root user will have no permission to work with the files +created by root (or another user) and borg operations will just fail with +`Permission denied`. + +The easy way to avoid this is to always access the repo as the same user: + +For a local repository just always invoke borg as same user. + +For a remote repository: always use e.g. borg@remote_host. You can use this +from different local users, the remote user accessing the repo will always be +borg. + +If you need to access a local repository from different users, you can use the +same method by using ssh to borg@localhost. + +Important note about files changing during the backup process +------------------------------------------------------------- + +Borg does not do anything about the internal consistency of the data +it backs up. It just reads and backs up each file in whatever state +that file is when Borg gets to it. On an active system, this can lead +to two kinds of inconsistency: + +- By the time Borg backs up a file, it might have changed since the backup process was initiated +- A file could change while Borg is backing it up, making the file internally inconsistent + +If you have a set of files and want to ensure that they are backed up +in a specific or consistent state, you must take steps to prevent +changes to those files during the backup process. There are a few +common techniques to achieve this. + +- Avoid running any programs that might change the files. + +- Snapshot files, filesystems, container storage volumes, or logical volumes. LVM or ZFS might be useful here. + +- Dump databases or stop the database servers. + +- Shut down virtual machines before backing up their images. + +- Shut down containers before backing up their storage volumes. + +For some systems Borg might work well enough without these +precautions. If you are simply backing up the files on a system that +isn't very active (e.g. in a typical home directory), Borg usually +works well enough without further care for consistency. Log files and +caches might not be in a perfect state, but this is rarely a problem. + +For databases, virtual machines, and containers, there are specific +techniques for backing them up that do not simply use Borg to backup +the underlying filesystem. For databases, check your database +documentation for techniques that will save the database state between +transactions. For virtual machines, consider running the backup on +the VM itself or mounting the filesystem while the VM is shut down. +For Docker containers, perhaps docker's "save" command can help. + Automating backups ------------------ @@ -77,6 +158,11 @@ your setup. Do not forget to test your created backups to make sure everything you need is being backed up and that the ``prune`` command is keeping and deleting the correct backups. +.. note:: + + Please see the :ref:`software` section for related tooling for automating + backups. + :: #!/bin/sh @@ -84,10 +170,8 @@ backed up and that the ``prune`` command is keeping and deleting the correct bac # Setting this, so the repo does not need to be given on the commandline: export BORG_REPO=ssh://username@example.com:2022/~/backup/main - # Setting this, so you won't be asked for your repository passphrase: + # See the section "Passphrase notes" for more infos. export BORG_PASSPHRASE='XYZl0ngandsecurepa_55_phrasea&&123' - # or this to ask an external program to supply the passphrase: - export BORG_PASSCOMMAND='pass show backup' # some helpers and error handling: info() { printf "\n%s %s\n\n" "$( date )" "$*" >&2; } @@ -107,7 +191,6 @@ backed up and that the ``prune`` command is keeping and deleting the correct bac --compression lz4 \ --exclude-caches \ --exclude '/home/*/.cache/*' \ - --exclude '/var/cache/*' \ --exclude '/var/tmp/*' \ \ ::'{hostname}-{now}' \ @@ -138,14 +221,12 @@ backed up and that the ``prune`` command is keeping and deleting the correct bac # use highest exit code as global exit code global_exit=$(( backup_exit > prune_exit ? backup_exit : prune_exit )) - if [ ${global_exit} -eq 1 ]; - then - info "Backup and/or Prune finished with a warning" - fi - - if [ ${global_exit} -gt 1 ]; - then - info "Backup and/or Prune finished with an error" + if [ ${global_exit} -eq 0 ]; then + info "Backup and Prune finished successfully" + elif [ ${global_exit} -eq 1 ]; then + info "Backup and/or Prune finished with warnings" + else + info "Backup and/or Prune finished with errors" fi exit ${global_exit} @@ -177,6 +258,50 @@ the sudoers(5) man page. To debug what your borg process is actually seeing, find its PID (``ps aux|grep borg``) and then look into ``/proc//environ``. +.. passphrase_notes: + +Passphrase notes +---------------- + +If you use encryption (or authentication), Borg will interactively ask you +for a passphrase to encrypt/decrypt the keyfile / repokey. + +A passphrase should be a single line of text, a trailing linefeed will be +stripped. + +For your own safety, you maybe want to avoid empty passphrases as well +extremely long passphrase (much more than 256 bits of entropy). + +Also avoid passphrases containing non-ASCII characters. +Borg is technically able to process all unicode text, but you might get into +trouble reproducing the same encoded utf-8 bytes or with keyboard layouts, +so better just avoid non-ASCII stuff. + +If you want to automate, you can alternatively supply the passphrase +directly or indirectly using some environment variables. + +You can directly give a passphrase:: + + # use this passphrase (use safe permissions on the script!): + export BORG_PASSPHRASE='my super secret passphrase' + +Or ask an external program to supply the passphrase:: + + # use the "pass" password manager to get the passphrase: + export BORG_PASSCOMMAND='pass show backup' + + # use GPG to get the passphrase contained in a gpg-encrypted file: + export BORG_PASSCOMMAND='gpg --decrypt borg-passphrase.gpg' + +Or read the passphrase from an open file descriptor:: + + export BORG_PASSPHRASE_FD=42 + +Using hardware crypto devices (like Nitrokey, Yubikey and others) is not +directly supported by borg, but you can use these indirectly. +E.g. if your crypto device supports GPG and borg calls ``gpg`` via +``BORG_PASSCOMMAND``, it should just work. + .. backup_compression: Backup compression @@ -185,17 +310,32 @@ Backup compression The default is lz4 (very fast, but low compression ratio), but other methods are supported for different situations. +You can use zstd for a wide range from high speed (and relatively low +compression) using N=1 to high compression (and lower speed) using N=22. + +zstd is a modern compression algorithm and might be preferable over zlib and +lzma, except if you need compatibility to older borg versions (< 1.1.4) that +did not yet offer zstd.:: + + $ borg create --compression zstd,N /path/to/repo::arch ~ + +Other options are: + If you have a fast repo storage and you want minimum CPU usage, no compression:: $ borg create --compression none /path/to/repo::arch ~ If you have a less fast repo storage and you want a bit more compression (N=0..9, -0 means no compression, 9 means high compression): :: +0 means no compression, 9 means high compression): + +:: $ borg create --compression zlib,N /path/to/repo::arch ~ If you have a very slow repo storage and you want high compression (N=0..9, 0 means -low compression, 9 means high compression): :: +low compression, 9 means high compression): + +:: $ borg create --compression lzma,N /path/to/repo::arch ~ @@ -207,32 +347,21 @@ Keep an eye on CPU load and throughput. Repository encryption --------------------- -Repository encryption can be enabled or disabled at repository creation time -(the default is enabled, with `repokey` method):: - - $ borg init --encryption=none|repokey|keyfile PATH - -When repository encryption is enabled all data is encrypted using 256-bit AES_ -encryption and the integrity and authenticity is verified using `HMAC-SHA256`_. +You can choose the repository encryption mode at repository creation time:: -All data is encrypted on the client before being written to the repository. This -means that an attacker who manages to compromise the host containing an -encrypted archive will not be able to access any of the data, even while the backup -is being made. + $ borg init --encryption=MODE PATH -|project_name| supports different methods to store the AES and HMAC keys. +For a list of available encryption MODEs and their descriptions, please refer +to :ref:`borg_init`. -``repokey`` mode - The key is stored inside the repository (in its "config" file). - Use this mode if you trust in your good passphrase giving you enough - protection. The repository server never sees the plaintext key. +If you use encryption, all data is encrypted on the client before being written +to the repository. +This means that an attacker who manages to compromise the host containing an +encrypted repository will not be able to access any of the data, even while the +backup is being made. -``keyfile`` mode - The key is stored on your local disk (in ``~/.config/borg/keys/``). - Use this mode if you want "passphrase and having-the-key" security. - -In both modes, the key is stored in encrypted form and can be only decrypted -by providing the correct passphrase. +Key material is stored in encrypted form and can be only decrypted by providing +the correct passphrase. For automated backups the passphrase can be specified using the `BORG_PASSPHRASE` environment variable. @@ -291,3 +420,153 @@ mounting the remote filesystem, for example, using sshfs:: You can also use other remote filesystems in a similar way. Just be careful, not all filesystems out there are really stable and working good enough to be acceptable for backup usage. + + +Restoring a backup +------------------ + +Please note that we are only describing the most basic commands and options +here - please refer to the command reference to see more. + +For restoring, you usually want to work **on the same machine as the same user** +that was also used to create the backups of the wanted files. Doing it like +that avoids quite some issues: + +- no confusion relating to paths +- same mapping of user/group names to user/group IDs +- no permission issues +- you likely already have a working borg setup there, + + - maybe including a environment variable for the key passphrase (for encrypted repos), + - maybe including a keyfile for the repo (not needed for repokey mode), + - maybe including a ssh key for the repo server (not needed for locally mounted repos), + - maybe including a valid borg cache for that repo (quicker than cache rebuild). + +The **user** might be: + +- root (if full backups, backups including system stuff or multiple + users' files were made) +- some specific user using sudo to execute borg as root +- some specific user (if backups of that user's files were made) + +A borg **backup repository** can be either: + +- in a local directory (like e.g. a locally mounted USB disk) +- on a remote backup server machine that is reachable via ssh (client/server) + +If the repository is encrypted, you will also need the **key** and the **passphrase** +(which is protecting the key). + +The **key** can be located: + +- in the repository (**repokey** mode). + + Easy, this will usually "just work". +- in the home directory of the user who did the backup (**keyfile** mode). + + This may cause a bit more effort: + + - if you have just lost that home directory and you first need to restore the + borg key (e.g. from the separate backup you have made of it or from another + user or machine accessing the same repository). + - if you first must find out the correct machine / user / home directory + (where the borg client was run to make the backups). + +The **passphrase** for the key has been either: + +- entered interactively at backup time + (not practical if backup is automated / unattended). +- acquired via some environment variable driven mechanism in the backup script + (look there for BORG_PASSPHRASE, BORG_PASSCOMMAND, etc. and just do it like + that). + +There are **2 ways to restore** files from a borg backup repository: + +- **borg mount** - use this if: + + - you don't precisely know what files you want to restore + - you don't know which archive contains the files (in the state) you want + - you need to look into files / directories before deciding what you want + - you need a relatively low volume of data restored + - you don't care for restoring stuff that the FUSE mount is not implementing yet + (like special fs flags, ACLs) + - you have a client with good resources (RAM, CPU, temp. disk space) + - you want to rather use some filemanager to restore (copy) files than borg + extract shell commands + +- **borg extract** - use this if: + + - you precisely know what you want (repo, archive, path) + - you need a high volume of files restored (best speed) + - you want a as-complete-as-it-gets reproduction of file metadata + (like special fs flags, ACLs) + - you have a client with low resources (RAM, CPU, temp. disk space) + + +Example with **borg mount**: + +:: + + # open a new, separate terminal (this terminal will be blocked until umount) + + # now we find out the archive names we have in the repo: + borg list /mnt/backup/borg_repo + + # mount one archive from a borg repo: + borg mount /mnt/backup/borg_repo::myserver-system-2019-08-11 /mnt/borg + + # alternatively, mount all archives from a borg repo (slower): + borg mount /mnt/backup/borg_repo /mnt/borg + + # it may take a while until you will see stuff in /mnt/borg. + + # now use another terminal or file browser and look into /mnt/borg. + # when finished, umount to unlock the repo and unblock the terminal: + borg umount /mnt/borg + + +Example with **borg extract**: + +:: + + # borg extract always extracts into current directory and that directory + # should be empty (borg does not support transforming a non-empty dir to + # the state as present in your backup archive). + mkdir borg_restore + cd borg_restore + + # now we find out the archive names we have in the repo: + borg list /mnt/backup/borg_repo + + # we could find out the archive contents, esp. the path layout: + borg list /mnt/backup/borg_repo::myserver-system-2019-08-11 + + # we extract only some specific path (note: no leading / !): + borg extract /mnt/backup/borg_repo::myserver-system-2019-08-11 path/to/extract + + # alternatively, we could fully extract the archive: + borg extract /mnt/backup/borg_repo::myserver-system-2019-08-11 + + # now move the files to the correct place... + + +Difference when using a **remote borg backup server**: + +It is basically all the same as with the local repository, but you need to +refer to the repo using a ``ssh://`` URL. + +In the given example, ``borg`` is the user name used to log into the machine +``backup.example.org`` which runs ssh on port ``2222`` and has the borg repo +in ``/path/to/repo``. + +Instead of giving a FQDN or a hostname, you can also give an IP address. + +As usual, you either need a password to log in or the backup server might +have authentication set up via ssh ``authorized_keys`` (which is likely the +case if unattended, automated backups were done). + +:: + + borg mount ssh://borg@backup.example.org:2222/path/to/repo /mnt/borg + # or + borg extract ssh://borg@backup.example.org:2222/path/to/repo diff --git a/docs/resources.rst b/docs/resources.rst index 2ef0f021f7..89f3b06ae5 100644 --- a/docs/resources.rst +++ b/docs/resources.rst @@ -27,6 +27,8 @@ Some of them refer to attic, but you can do the same stuff (and more) with borgb - `Evolution of Borg (youtube) `_ +.. _software: + Software -------- diff --git a/docs/support.rst b/docs/support.rst index 293264f51e..ce80dacc61 100644 --- a/docs/support.rst +++ b/docs/support.rst @@ -1,62 +1,13 @@ -.. include:: global.rst.inc .. _support: Support ======= -Please first read the docs, the existing issue tracker issues and mailing -list posts -- a lot of stuff is already documented / explained / discussed / -filed there. +Support and Services +-------------------- -Issue Tracker -------------- +Please see https://www.borgbackup.org/ for free and paid support and service options. -If you've found a bug or have a concrete feature request, please create a new -ticket on the project's `issue tracker`_. - -For more general questions or discussions, IRC or mailing list are preferred. - -Chat (IRC) ----------- -Join us on channel #borgbackup on chat.freenode.net. - -As usual on IRC, just ask or tell directly and then patiently wait for replies. -Stay connected. - -You could use the following link (after connecting, you can change the random -nickname you get by typing "/nick mydesirednickname"): - -http://webchat.freenode.net/?randomnick=1&channels=%23borgbackup&uio=MTY9dHJ1ZSY5PXRydWUa8 - -.. _mailing_list: - -Mailing list ------------- - -To find out about the mailing list, its topic, how to subscribe, how to -unsubscribe and where you can find the archives of the list, see the -`mailing list homepage -`_. - -Bounties and Fundraisers ------------------------- - -We use `BountySource `_ to allow -monetary contributions to the project and the developers, who push it forward. - -There, you can give general funds to the borgbackup members (the developers will -then spend the funds as they deem fit). If you do not have some specific bounty -(see below), you can use this as a general way to say "Thank You!" and support -the software / project you like. - -If you want to encourage developers to fix some specific issue or implement some -specific feature suggestion, you can post a new bounty or back an existing one -(they always refer to an issue in our `issue tracker`_). - -As a developer, you can become a Bounty Hunter and win bounties (earn money) by -contributing to |project_name|, a free and open source software project. - -We might also use BountySource to fund raise for some bigger goals. .. _security-contact: @@ -73,5 +24,31 @@ GPG Key Fingerprint: 6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393 The public key can be fetched from any GPG keyserver, but be careful: you must use the **full fingerprint** to check that you got the correct key. -`Releases `_ are signed with this GPG key, -please use GPG to verify their authenticity. +Verifying signed releases +------------------------- + +`Releases `_ are signed with the same GPG key and a .asc file is provided for each binary. + +To verify a signature, the public key needs to be known to GPG. It can be imported into the local keystore from a keyserver with the fingerprint:: + + gpg --recv-keys "6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393" + +If GPG successfully imported the key, the output should be (among other things): 'Total number processed: 1'. + +To verify for example the signature of the borg-linux64 binary:: + + gpg --verify borg-linux64.asc + +GPG outputs if it finds a good signature. The output should look similar to this:: + + gpg: Signature made Sat 30 Dec 2017 01:07:36 PM CET using RSA key ID 51F78E01 + gpg: Good signature from "Thomas Waldmann " + gpg: aka "Thomas Waldmann " + gpg: aka "Thomas Waldmann " + gpg: aka "Thomas Waldmann " + gpg: WARNING: This key is not certified with a trusted signature! + gpg: There is no indication that the signature belongs to the owner. + Primary key fingerprint: 6D5B EF9A DD20 7580 5747 B70F 9F88 FB52 FAF7 B393 + Subkey fingerprint: 2F81 AFFB AB04 E11F E8EE 65D4 243A CFA9 51F7 8E01 + +If you want to make absolutely sure that you have the right key, you need to verify it via another channel and assign a trust-level to it. diff --git a/docs/usage.rst b/docs/usage.rst index de335c1c0f..1a0bae185a 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -17,7 +17,10 @@ Usage // Redirect to general docs if(hash == "") { - window.location.pathname = window.location.pathname.replace("usage.html", "usage/general.html"); + var replaced = window.location.pathname.replace("usage.html", "usage/general.html"); + if (replaced != window.location.pathname) { + window.location.pathname = replaced; + } } // Fixup anchored links from when usage.html contained all the commands else if(hash.startsWith("borg-key") || hash == "borg-change-passphrase") { @@ -49,6 +52,7 @@ Usage usage/recreate usage/tar usage/serve + usage/config usage/lock usage/benchmark diff --git a/docs/usage/benchmark_crud.rst.inc b/docs/usage/benchmark_crud.rst.inc index b76c091dc6..3e67d7b6b7 100644 --- a/docs/usage/benchmark_crud.rst.inc +++ b/docs/usage/benchmark_crud.rst.inc @@ -6,23 +6,23 @@ borg benchmark crud ------------------- .. code-block:: none - borg [common options] benchmark crud [options] REPO PATH + borg [common options] benchmark crud [options] REPOSITORY PATH .. only:: html .. class:: borg-options-table - +-------------------------------------------------------+----------+------------------------------------------+ - | **positional arguments** | - +-------------------------------------------------------+----------+------------------------------------------+ - | | ``REPO`` | repo to use for benchmark (must exist) | - +-------------------------------------------------------+----------+------------------------------------------+ - | | ``PATH`` | path were to create benchmark input data | - +-------------------------------------------------------+----------+------------------------------------------+ - | .. class:: borg-common-opt-ref | - | | - | :ref:`common_options` | - +-------------------------------------------------------+----------+------------------------------------------+ + +-------------------------------------------------------+----------------+----------------------------------------------+ + | **positional arguments** | + +-------------------------------------------------------+----------------+----------------------------------------------+ + | | ``REPOSITORY`` | repository to use for benchmark (must exist) | + +-------------------------------------------------------+----------------+----------------------------------------------+ + | | ``PATH`` | path were to create benchmark input data | + +-------------------------------------------------------+----------------+----------------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-------------------------------------------------------+----------------+----------------------------------------------+ .. raw:: html @@ -34,8 +34,8 @@ borg benchmark crud .. only:: latex - REPO - repo to use for benchmark (must exist) + REPOSITORY + repository to use for benchmark (must exist) PATH path were to create benchmark input data @@ -50,13 +50,13 @@ This command benchmarks borg CRUD (create, read, update, delete) operations. It creates input data below the given PATH and backups this data into the given REPO. The REPO must already exist (it could be a fresh empty repo or an existing repo, the -command will create / read / update / delete some archives named borg-test-data\* there. +command will create / read / update / delete some archives named borg-benchmark-crud\* there. Make sure you have free space there, you'll need about 1GB each (+ overhead). -If your repository is encrypted and borg needs a passphrase to unlock the key, use: +If your repository is encrypted and borg needs a passphrase to unlock the key, use:: -BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH + BORG_PASSPHRASE=mysecret borg benchmark crud REPO PATH Measurements are done with different input file sizes and counts. The file contents are very artificial (either all zero or all random), @@ -70,7 +70,7 @@ C- == borg create (1st archive creation, no compression, do not use files cache) R- == borg extract (extract archive, dry-run, do everything, but do not write files to disk) R-Z- == all zero files. Measuring heavily duplicated files. R-R- == random files. No duplication here, measuring throughput through all processing - stages, except writing to disk. + stages, except writing to disk. U- == borg create (2nd archive creation of unchanged input files, measure files cache speed) The throughput value is kind of virtual here, it does not actually read the file. diff --git a/docs/usage/borgfs.rst b/docs/usage/borgfs.rst new file mode 100644 index 0000000000..3652549308 --- /dev/null +++ b/docs/usage/borgfs.rst @@ -0,0 +1,3 @@ +:orphan: + +.. include:: borgfs.rst.inc diff --git a/docs/usage/borgfs.rst.inc b/docs/usage/borgfs.rst.inc new file mode 100644 index 0000000000..3488e936bf --- /dev/null +++ b/docs/usage/borgfs.rst.inc @@ -0,0 +1,138 @@ +.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! + +.. _borg_borgfs: + +borg borgfs +----------- +.. code-block:: none + + borg [common options] borgfs [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] + +.. only:: html + + .. class:: borg-options-table + + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **positional arguments** | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``REPOSITORY_OR_ARCHIVE`` | repository/archive to mount | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``MOUNTPOINT`` | where to mount filesystem | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``PATH`` | paths to extract; patterns are supported | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **optional arguments** | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-V``, ``--version`` | show version number and exit | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-f``, ``--foreground`` | stay in foreground, do not daemonize | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-o`` | Extra mount options | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Archive filters** — Archive filters can be applied to repository targets. | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-P PREFIX``, ``--prefix PREFIX`` | only consider archive names starting with this prefix. | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-a GLOB``, ``--glob-archives GLOB`` | only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--sort-by KEYS`` | Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--first N`` | consider first N archives after other filters were applied | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--last N`` | consider last N archives after other filters were applied | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Exclusion options** | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--strip-components NUMBER`` | Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + + .. raw:: html + + + +.. only:: latex + + REPOSITORY_OR_ARCHIVE + repository/archive to mount + MOUNTPOINT + where to mount filesystem + PATH + paths to extract; patterns are supported + + + optional arguments + -V, --version show version number and exit + -f, --foreground stay in foreground, do not daemonize + -o Extra mount options + + + :ref:`common_options` + | + + Archive filters + -P PREFIX, --prefix PREFIX only consider archive names starting with this prefix. + -a GLOB, --glob-archives GLOB only consider archive names matching the glob. sh: rules apply, see "borg help patterns". ``--prefix`` and ``--glob-archives`` are mutually exclusive. + --sort-by KEYS Comma-separated list of sorting keys; valid keys are: timestamp, name, id; default is: timestamp + --first N consider first N archives after other filters were applied + --last N consider last N archives after other filters were applied + + + Exclusion options + -e PATTERN, --exclude PATTERN exclude paths matching PATTERN + --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line + --pattern PATTERN experimental: include/exclude paths matching PATTERN + --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line + --strip-components NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. + + +Description +~~~~~~~~~~~ + +This command mounts an archive as a FUSE filesystem. This can be useful for +browsing an archive or restoring individual files. Unless the ``--foreground`` +option is given the command will run in the background until the filesystem +is ``umounted``. + +The command ``borgfs`` provides a wrapper for ``borg mount``. This can also be +used in fstab entries: +``/path/to/repo /mnt/point fuse.borgfs defaults,noauto 0 0`` + +To allow a regular user to use fstab entries, add the ``user`` option: +``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0`` + +For mount options, see the fuse(8) manual page. Additional mount options +supported by borg: + +- versions: when used with a repository mount, this gives a merged, versioned + view of the files in the archives. EXPERIMENTAL, layout may change in future. +- allow_damaged_files: by default damaged files (where missing chunks were + replaced with runs of zeros by borg check ``--repair``) are not readable and + return EIO (I/O error). Set this option to read such files. + +The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users +to tweak the performance. It sets the number of cached data chunks; additional +memory usage can be up to ~8 MiB times this number. The default is the number +of CPU cores. + +When the daemonized process receives a signal or crashes, it does not unmount. +Unmounting in these cases could cause an active rsync or similar process +to unintentionally delete data. + +When running in the foreground ^C/SIGINT unmounts cleanly, but other +signals or crashes do not. \ No newline at end of file diff --git a/docs/usage/change-passphrase.rst.inc b/docs/usage/change-passphrase.rst.inc index 1114d35039..44edba19ba 100644 --- a/docs/usage/change-passphrase.rst.inc +++ b/docs/usage/change-passphrase.rst.inc @@ -43,4 +43,9 @@ Description ~~~~~~~~~~~ The key files used for repository encryption are optionally passphrase -protected. This command can be used to change this passphrase. \ No newline at end of file +protected. This command can be used to change this passphrase. + +Please note that this command only changes the passphrase, but not any +secret protected by it (like e.g. encryption/MAC keys or chunker seed). +Thus, changing the passphrase after passphrase and borg key got compromised +does not protect future (nor past) backups to the same repository. \ No newline at end of file diff --git a/docs/usage/check.rst.inc b/docs/usage/check.rst.inc index cc61a76eab..e9669bbe9c 100644 --- a/docs/usage/check.rst.inc +++ b/docs/usage/check.rst.inc @@ -84,46 +84,46 @@ Description The check command verifies the consistency of a repository and the corresponding archives. +check --repair is a potentially dangerous function and might lead to data loss +(for kinds of corruption it is not capable of dealing with). BE VERY CAREFUL! + First, the underlying repository data files are checked: -- For all segments the segment magic (header) is checked -- For all objects stored in the segments, all metadata (e.g. crc and size) and +- For all segments, the segment magic header is checked. +- For all objects stored in the segments, all metadata (e.g. CRC and size) and all data is read. The read data is checked by size and CRC. Bit rot and other types of accidental damage can be detected this way. -- If we are in repair mode and a integrity error is detected for a segment, - we try to recover as many objects from the segment as possible. -- In repair mode, it makes sure that the index is consistent with the data - stored in the segments. -- If you use a remote repo server via ssh:, the repo check is executed on the - repo server without causing significant network traffic. +- In repair mode, if an integrity error is detected in a segment, try to recover + as many objects from the segment as possible. +- In repair mode, make sure that the index is consistent with the data stored in + the segments. +- If checking a remote repo via ``ssh:``, the repo check is executed on the server + without causing significant network traffic. - The repository check can be skipped using the ``--archives-only`` option. Second, the consistency and correctness of the archive metadata is verified: - Is the repo manifest present? If not, it is rebuilt from archive metadata chunks (this requires reading and decrypting of all metadata and data). -- Check if archive metadata chunk is present. if not, remove archive from - manifest. +- Check if archive metadata chunk is present; if not, remove archive from manifest. - For all files (items) in the archive, for all chunks referenced by these - files, check if chunk is present. - If a chunk is not present and we are in repair mode, replace it with a same-size - replacement chunk of zeros. - If a previously lost chunk reappears (e.g. via a later backup) and we are in - repair mode, the all-zero replacement chunk will be replaced by the correct chunk. - This requires reading of archive and file metadata, but not data. -- If we are in repair mode and we checked all the archives: delete orphaned - chunks from the repo. -- if you use a remote repo server via ssh:, the archive check is executed on - the client machine (because if encryption is enabled, the checks will require - decryption and this is always done client-side, because key access will be - required). -- The archive checks can be time consuming, they can be skipped using the + files, check if chunk is present. In repair mode, if a chunk is not present, + replace it with a same-size replacement chunk of zeroes. If a previously lost + chunk reappears (e.g. via a later backup), in repair mode the all-zero replacement + chunk will be replaced by the correct chunk. This requires reading of archive and + file metadata, but not data. +- In repair mode, when all the archives were checked, orphaned chunks are deleted + from the repo. One cause of orphaned chunks are input file related errors (like + read errors) in the archive creation process. +- If checking a remote repo via ``ssh:``, the archive check is executed on the + client machine because it requires decryption, and this is always done client-side + as key access is needed. +- The archive checks can be time consuming; they can be skipped using the ``--repository-only`` option. The ``--verify-data`` option will perform a full integrity verification (as opposed to checking the CRC32 of the segment) of data, which means reading the data from the repository, decrypting and decompressing it. This is a cryptographic verification, which will detect (accidental) corruption. For encrypted repositories it is -tamper-resistant as well, unless the attacker has access to the keys. - -It is also very slow. \ No newline at end of file +tamper-resistant as well, unless the attacker has access to the keys. It is also very +slow. \ No newline at end of file diff --git a/docs/usage/common-options.rst.inc b/docs/usage/common-options.rst.inc index 093041c47c..41fededf14 100644 --- a/docs/usage/common-options.rst.inc +++ b/docs/usage/common-options.rst.inc @@ -8,11 +8,12 @@ -p, --progress show progress information --log-json Output one JSON object per log line instead of formatted text. --lock-wait SECONDS wait at most SECONDS for acquiring a repository/cache lock (default: 1). +--bypass-lock Bypass locking mechanism --show-version show/log the borg version --show-rc show/log the return code (rc) ---no-files-cache do not load/update the file metadata cache used to detect unchanged files --umask M set umask to M (local and remote, default: 0077) --remote-path PATH use PATH as borg executable on the remote (default: "borg") --remote-ratelimit RATE set remote network upload rate limit in kiByte/s (default: 0=unlimited) --consider-part-files treat part files like normal files (e.g. to list/extract them) --debug-profile FILE Write execution profile in Borg format into FILE. For local use a Python-compatible file can be generated by suffixing FILE with ".pyprof". +--rsh RSH Use this command to connect to the 'borg serve' process (default: 'ssh') diff --git a/docs/usage/config.rst b/docs/usage/config.rst new file mode 100644 index 0000000000..dc21eb39da --- /dev/null +++ b/docs/usage/config.rst @@ -0,0 +1,22 @@ +.. include:: config.rst.inc + +.. note:: + + The repository & cache config files are some of the only directly manipulable + parts of a repository that aren't versioned or backed up, so be careful when + making changes\! + +Examples +~~~~~~~~ +:: + + # find cache directory + $ cd ~/.cache/borg/$(borg config /path/to/repo id) + + # reserve some space + $ borg config /path/to/repo additional_free_space 2G + + # make a repo append-only + $ borg config /path/to/repo append_only 1 + + diff --git a/docs/usage/config.rst.inc b/docs/usage/config.rst.inc new file mode 100644 index 0000000000..d90c327ee7 --- /dev/null +++ b/docs/usage/config.rst.inc @@ -0,0 +1,79 @@ +.. IMPORTANT: this file is auto-generated from borg's built-in help, do not edit! + +.. _borg_config: + +borg config +----------- +.. code-block:: none + + borg [common options] config [options] [REPOSITORY] [NAME] [VALUE] + +.. only:: html + + .. class:: borg-options-table + + +-------------------------------------------------------+----------------------+----------------------------------------+ + | **positional arguments** | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | | ``REPOSITORY`` | repository to configure | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | | ``NAME`` | name of config key | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | | ``VALUE`` | new value for key | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | **optional arguments** | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | | ``-c``, ``--cache`` | get and set values from the repo cache | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | | ``-d``, ``--delete`` | delete the key from the config file | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | | ``-l``, ``--list`` | list the configuration of the repo | + +-------------------------------------------------------+----------------------+----------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-------------------------------------------------------+----------------------+----------------------------------------+ + + .. raw:: html + + + +.. only:: latex + + REPOSITORY + repository to configure + NAME + name of config key + VALUE + new value for key + + + optional arguments + -c, --cache get and set values from the repo cache + -d, --delete delete the key from the config file + -l, --list list the configuration of the repo + + + :ref:`common_options` + | + +Description +~~~~~~~~~~~ + +This command gets and sets options in a local repository or cache config file. +For security reasons, this command only works on local repositories. + +To delete a config value entirely, use ``--delete``. To list the values +of the configuration file or the default values, use ``--list``. To get and existing +key, pass only the key name. To set a key, pass both the key name and +the new value. Keys can be specified in the format "section.name" or +simply "name"; the section will default to "repository" and "cache" for +the repo and cache configs, respectively. + + +By default, borg config manipulates the repository config file. Using ``--cache`` +edits the repository cache's config file instead. \ No newline at end of file diff --git a/docs/usage/create.rst b/docs/usage/create.rst index 8e4a2e0361..c2505a2474 100644 --- a/docs/usage/create.rst +++ b/docs/usage/create.rst @@ -23,7 +23,7 @@ Examples # Backup the root filesystem into an archive named "root-YYYY-MM-DD" # use zlib compression (good, but slow) - default is lz4 (fast, low compression ratio) - $ borg create -C zlib,6 /path/to/repo::root-{now:%Y-%m-%d} / --one-file-system + $ borg create -C zlib,6 --one-file-system /path/to/repo::root-{now:%Y-%m-%d} / # Backup a remote host locally ("pull" style) using sshfs $ mkdir sshfs-mount @@ -41,21 +41,24 @@ Examples # Backup a raw device (must not be active/in use/mounted at that time) $ dd if=/dev/sdx bs=10M | borg create /path/to/repo::my-sdx - - # No compression (default) - $ borg create /path/to/repo::arch ~ + # No compression (none) + $ borg create --compression none /path/to/repo::arch ~ - # Super fast, low compression - $ borg create --compression lz4 /path/to/repo::arch ~ + # Super fast, low compression (lz4, default) + $ borg create /path/to/repo::arch ~ - # Less fast, higher compression (N = 0..9) + # Less fast, higher compression (zlib, N = 0..9) $ borg create --compression zlib,N /path/to/repo::arch ~ - # Even slower, even higher compression (N = 0..9) + # Even slower, even higher compression (lzma, N = 0..9) $ borg create --compression lzma,N /path/to/repo::arch ~ + # Only compress compressible data with lzma,N (N = 0..9) + $ borg create --compression auto,lzma,N /path/to/repo::arch ~ + # Use short hostname, user name and current time in archive name $ borg create /path/to/repo::{hostname}-{user}-{now} ~ - # Similar, use the same datetime format as borg 1.1 will have as default + # Similar, use the same datetime format that is default as of borg 1.1 $ borg create /path/to/repo::{hostname}-{user}-{now:%Y-%m-%dT%H:%M:%S} ~ # As above, but add nanoseconds $ borg create /path/to/repo::{hostname}-{user}-{now:%Y-%m-%dT%H:%M:%S.%f} ~ diff --git a/docs/usage/create.rst.inc b/docs/usage/create.rst.inc index c70e2a8f16..afce6c24d8 100644 --- a/docs/usage/create.rst.inc +++ b/docs/usage/create.rst.inc @@ -12,73 +12,95 @@ borg create .. class:: borg-options-table - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | **positional arguments** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``ARCHIVE`` | name of archive to create (must be also a valid directory name) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``PATH`` | paths to archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | **optional arguments** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-n``, ``--dry-run`` | do not create a backup archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-s``, ``--stats`` | print statistics for the created archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--list`` | output verbose list of items (files, dirs, ...) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--filter STATUSCHARS`` | only display items with the given status characters (see description) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--json`` | output stats as JSON. Implies ``--stats``. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--no-cache-sync`` | experimental: do not synchronize the cache. Implies ``--no-files-cache``. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | .. class:: borg-common-opt-ref | - | | - | :ref:`common_options` | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | **Exclusion options** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | **Filesystem options** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-x``, ``--one-file-system`` | stay in the same file system and do not store mount points of other file systems | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--numeric-owner`` | only store numeric user and group identifiers | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--noatime`` | do not store atime into archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--noctime`` | do not store ctime into archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--ignore-inode`` | ignore inode data in the file metadata cache used to detect unchanged files. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--read-special`` | open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | **Archive options** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--comment COMMENT`` | add a comment text to the archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--timestamp TIMESTAMP`` | manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). Alternatively, give a reference file/directory. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-c SECONDS``, ``--checkpoint-interval SECONDS`` | write checkpoint every SECONDS seconds (Default: 1800) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--chunker-params PARAMS`` | specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). default: 19,23,21,4095 | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-C COMPRESSION``, ``--compression COMPRESSION`` | select compression algorithm, see the output of the "borg help compression" command for details. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------+ + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **positional arguments** | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``ARCHIVE`` | name of archive to create (must be also a valid directory name) | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``PATH`` | paths to archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **optional arguments** | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-n``, ``--dry-run`` | do not create a backup archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-s``, ``--stats`` | print statistics for the created archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--list`` | output verbose list of items (files, dirs, ...) | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--filter STATUSCHARS`` | only display items with the given status characters (see description) | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--json`` | output stats as JSON. Implies ``--stats``. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--no-cache-sync`` | experimental: do not synchronize the cache. Implies not using the files cache. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--no-files-cache`` | do not load/update the file metadata cache used to detect unchanged files | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--stdin-name NAME`` | use NAME in archive for stdin data (default: "stdin") | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--stdin-user USER`` | set user USER in archive for stdin data (default: 'root') | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--stdin-group GROUP`` | set group GROUP in archive for stdin data (default: 'root') | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--stdin-mode M`` | set mode to M in archive for stdin data (default: 0660) | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Exclusion options** | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-nodump`` | exclude files flagged NODUMP | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Filesystem options** | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-x``, ``--one-file-system`` | stay in the same file system and do not store mount points of other file systems. This might behave different from your expectations, see the docs. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--numeric-owner`` | only store numeric user and group identifiers | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--noatime`` | do not store atime into archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--noctime`` | do not store ctime into archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--nobirthtime`` | do not store birthtime (creation date) into archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--nobsdflags`` | do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--noacls`` | do not read and store ACLs into archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--noxattrs`` | do not read and store xattrs into archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--ignore-inode`` | ignore inode data in the file metadata cache used to detect unchanged files. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--files-cache MODE`` | operate files cache in MODE. default: ctime,size,inode | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--read-special`` | open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Archive options** | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--comment COMMENT`` | add a comment text to the archive | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--timestamp TIMESTAMP`` | manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). Alternatively, give a reference file/directory. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-c SECONDS``, ``--checkpoint-interval SECONDS`` | write checkpoint every SECONDS seconds (Default: 1800) | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--chunker-params PARAMS`` | specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE). default: 19,23,21,4095 | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-C COMPRESSION``, ``--compression COMPRESSION`` | select compression algorithm, see the output of the "borg help compression" command for details. | + +-------------------------------------------------------+---------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html @@ -102,7 +124,12 @@ borg create --list output verbose list of items (files, dirs, ...) --filter STATUSCHARS only display items with the given status characters (see description) --json output stats as JSON. Implies ``--stats``. - --no-cache-sync experimental: do not synchronize the cache. Implies ``--no-files-cache``. + --no-cache-sync experimental: do not synchronize the cache. Implies not using the files cache. + --no-files-cache do not load/update the file metadata cache used to detect unchanged files + --stdin-name NAME use NAME in archive for stdin data (default: "stdin") + --stdin-user USER set user USER in archive for stdin data (default: 'root') + --stdin-group GROUP set group GROUP in archive for stdin data (default: 'root') + --stdin-mode M set mode to M in archive for stdin data (default: 0660) :ref:`common_options` @@ -113,17 +140,23 @@ borg create --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line - --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) + --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) --exclude-if-present NAME exclude directories that are tagged by containing a filesystem object with the given NAME --keep-exclude-tags, --keep-tag-files if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive + --exclude-nodump exclude files flagged NODUMP Filesystem options - -x, --one-file-system stay in the same file system and do not store mount points of other file systems + -x, --one-file-system stay in the same file system and do not store mount points of other file systems. This might behave different from your expectations, see the docs. --numeric-owner only store numeric user and group identifiers --noatime do not store atime into archive --noctime do not store ctime into archive + --nobirthtime do not store birthtime (creation date) into archive + --nobsdflags do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive + --noacls do not read and store ACLs into archive + --noxattrs do not read and store xattrs into archive --ignore-inode ignore inode data in the file metadata cache used to detect unchanged files. + --files-cache MODE operate files cache in MODE. default: ctime,size,inode --read-special open and read block and char device files as well as FIFOs as if they were regular files. Also follows symlinks pointing to these kinds of files. @@ -144,7 +177,8 @@ that means if relative paths are desired, the command has to be run from the cor directory. When giving '-' as path, borg will read data from standard input and create a -file 'stdin' in the created archive from that data. +file 'stdin' in the created archive from that data. See section *Reading from +stdin* below for details. The archive will consume almost no disk space for files or parts of files that have already been stored in other archives. @@ -157,13 +191,40 @@ In the archive name, you may use the following placeholders: {now}, {utcnow}, {fqdn}, {hostname}, {user} and some others. Backup speed is increased by not reprocessing files that are already part of -existing archives and weren't modified. Normally, detecting file modifications -will take inode information into consideration. This is problematic for files -located on sshfs and similar network file systems which do not provide stable -inode numbers, such files will always be considered modified. The -``--ignore-inode`` flag can be used to prevent this and improve performance. -This flag will reduce reliability of change detection however, with files -considered unmodified as long as their size and modification time are unchanged. +existing archives and weren't modified. The detection of unmodified files is +done by comparing multiple file metadata values with previous values kept in +the files cache. + +This comparison can operate in different modes as given by ``--files-cache``: + +- ctime,size,inode (default) +- mtime,size,inode (default behaviour of borg versions older than 1.1.0rc4) +- ctime,size (ignore the inode number) +- mtime,size (ignore the inode number) +- rechunk,ctime (all files are considered modified - rechunk, cache ctime) +- rechunk,mtime (all files are considered modified - rechunk, cache mtime) +- disabled (disable the files cache, all files considered modified - rechunk) + +inode number: better safety, but often unstable on network filesystems + +Normally, detecting file modifications will take inode information into +consideration to improve the reliability of file change detection. +This is problematic for files located on sshfs and similar network file +systems which do not provide stable inode numbers, such files will always +be considered modified. You can use modes without `inode` in this case to +improve performance, but reliability of change detection might be reduced. + +ctime vs. mtime: safety vs. speed + +- ctime is a rather safe way to detect changes to a file (metadata and contents) + as it can not be set from userspace. But, a metadata-only change will already + update the ctime, so there might be some unnecessary chunking/hashing even + without content changes. Some filesystems do not support ctime (change time). + E.g. doing a chown or chmod to a file will change its ctime. +- mtime usually works and only updates if file contents were changed. But mtime + can be arbitrarily set from userspace, e.g. to set mtime back to the same value + it had before a content change happened. This can be used maliciously as well as + well-meant, but in both cases mtime based cache modes can be problematic. The mount points of filesystems or filesystem snapshots should be the same for every creation of a new archive to ensure fast operation. This is because the file cache that @@ -174,7 +235,14 @@ The ``--progress`` option shows (from left to right) Original, Compressed and De (O, C and D, respectively), then the Number of files (N) processed so far, followed by the currently processed path. +When using ``--stats``, you will get some statistics about how much data was +added - the "This Archive" deduplicated size there is most interesting as that is +how much your repository will grow. Please note that the "All archives" stats refer to +the state after creation. Also, the ``--stats`` and ``--dry-run`` options are mutually +exclusive because the data is not actually compressed and deduplicated during a dry run. + See the output of the "borg help patterns" command for more help on exclude patterns. + See the output of the "borg help placeholders" command for more help on placeholders. .. man NOTES @@ -193,6 +261,20 @@ only include the objects specified by ``--exclude-if-present`` in your backup, and not include any other contents of the containing folder, this can be enabled through using the ``--keep-exclude-tags`` option. +The ``-x`` or ``--one-file-system`` option excludes directories, that are mountpoints (and everything in them). +It detects mountpoints by comparing the device number from the output of ``stat()`` of the directory and its +parent directory. Specifically, it excludes directories for which ``stat()`` reports a device number different +from the device number of their parent. Be aware that in Linux (and possibly elsewhere) there are directories +with device number different from their parent, which the kernel does not consider a mountpoint and also the +other way around. Examples are bind mounts (possibly same device number, but always a mountpoint) and ALL +subvolumes of a btrfs (different device number from parent but not necessarily a mountpoint). Therefore when +using ``--one-file-system``, one should make doubly sure that the backup works as intended especially when using +btrfs. This is even more important, if the btrfs layout was created by someone else, e.g. a distribution +installer. + + +.. _list_item_flags: + Item flags ++++++++++ @@ -230,4 +312,23 @@ Other flags used include: - 'i' = backup data was read from standard input (stdin) - '-' = dry run, item was *not* backed up - 'x' = excluded, item was *not* backed up -- '?' = missing status code (if you see this, please file a bug report!) \ No newline at end of file +- '?' = missing status code (if you see this, please file a bug report!) + +Reading from stdin +++++++++++++++++++ + +To read from stdin, specify ``-`` as path and pipe directly to borg:: + + backup-vm --id myvm --stdout | borg create REPO::ARCHIVE - + +Note that piping to borg creates an archive even if the command piping +to borg exits with a failure. In this case, **one can end up with +truncated output being backed up**. + +Reading from stdin yields just a stream of data without file metadata +associated with it, and the files cache is not needed at all. So it is +safe to disable it via ``--no-files-cache`` and speed up backup +creation a bit. + +By default, the content read from stdin is stored in a file called 'stdin'. +Use ``--stdin-name`` to change the name. \ No newline at end of file diff --git a/docs/usage/delete.rst.inc b/docs/usage/delete.rst.inc index e965b830f6..1e789bc291 100644 --- a/docs/usage/delete.rst.inc +++ b/docs/usage/delete.rst.inc @@ -6,7 +6,7 @@ borg delete ----------- .. code-block:: none - borg [common options] delete [options] [TARGET] + borg [common options] delete [options] [REPOSITORY_OR_ARCHIVE] [ARCHIVE...] .. only:: html @@ -15,10 +15,14 @@ borg delete +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``TARGET`` | archive or repository to delete | + | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to delete | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``ARCHIVE`` | archives to delete | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-n``, ``--dry-run`` | do not change repository | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-s``, ``--stats`` | print statistics for the deleted archive | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--cache-only`` | delete only the local cache for the given repository | @@ -54,11 +58,14 @@ borg delete .. only:: latex - TARGET - archive or repository to delete + REPOSITORY_OR_ARCHIVE + repository or archive to delete + ARCHIVE + archives to delete optional arguments + -n, --dry-run do not change repository -s, --stats print statistics for the deleted archive --cache-only delete only the local cache for the given repository --force force deletion of corrupted archives, use ``--force --force`` in case ``--force`` does not work. @@ -81,4 +88,9 @@ Description This command deletes an archive from the repository or the complete repository. Disk space is reclaimed accordingly. If you delete the complete repository, the -local cache for it (if any) is also deleted. \ No newline at end of file +local cache for it (if any) is also deleted. + +When using ``--stats``, you will get some statistics about how much data was +deleted - the "Deleted data" deduplicated size there is most interesting as +that is how much your repository will shrink. +Please note that the "All archives" stats refer to the state after deletion. \ No newline at end of file diff --git a/docs/usage/diff.rst b/docs/usage/diff.rst index e297244360..d9542a737a 100644 --- a/docs/usage/diff.rst +++ b/docs/usage/diff.rst @@ -16,6 +16,7 @@ Examples $ echo "something" >> file2 $ borg create ../testrepo::archive2 . + $ echo "testing 123" >> file1 $ rm file3 $ touch file4 $ borg create ../testrepo::archive3 . @@ -26,11 +27,18 @@ Examples +135 B -252 B file2 $ borg diff testrepo::archive2 archive3 + +17 B -5 B file1 added 0 B file4 removed 0 B file3 $ borg diff testrepo::archive1 archive3 - [-rw-r--r-- -> -rwxr-xr-x] file1 + +17 B -5 B [-rw-r--r-- -> -rwxr-xr-x] file1 +135 B -252 B file2 added 0 B file4 removed 0 B file3 + + $ borg diff --json-lines testrepo::archive1 archive3 + {"path": "file1", "changes": [{"type": "modified", "added": 17, "removed": 5}, {"type": "mode", "old_mode": "-rw-r--r--", "new_mode": "-rwxr-xr-x"}]} + {"path": "file2", "changes": [{"type": "modified", "added": 135, "removed": 252}]} + {"path": "file4", "changes": [{"type": "added", "size": 0}]} + {"path": "file3", "changes": [{"type": "removed", "size": 0}]} \ No newline at end of file diff --git a/docs/usage/diff.rst.inc b/docs/usage/diff.rst.inc index ef5b831a60..c048874436 100644 --- a/docs/usage/diff.rst.inc +++ b/docs/usage/diff.rst.inc @@ -6,49 +6,45 @@ borg diff --------- .. code-block:: none - borg [common options] diff [options] REPO_ARCHIVE1 ARCHIVE2 [PATH...] + borg [common options] diff [options] REPO::ARCHIVE1 ARCHIVE2 [PATH...] .. only:: html .. class:: borg-options-table - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | **positional arguments** | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``REPO_ARCHIVE1`` | repository location and ARCHIVE1 name | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``ARCHIVE2`` | ARCHIVE2 name (no repository location allowed) | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``PATH`` | paths of items inside the archives to compare; patterns are supported | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | **optional arguments** | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--numeric-owner`` | only consider numeric user and group identifiers | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--same-chunker-params`` | Override check of chunker parameters. | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--sort`` | Sort the output lines by file path. | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | .. class:: borg-common-opt-ref | - | | - | :ref:`common_options` | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | **Exclusion options** | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ - | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | - +-------------------------------------------------------+-----------------------------------------------+---------------------------------------------------------------------------------------------------------------------------+ + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | **positional arguments** | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``REPO::ARCHIVE1`` | repository location and ARCHIVE1 name | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``ARCHIVE2`` | ARCHIVE2 name (no repository location allowed) | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``PATH`` | paths of items inside the archives to compare; patterns are supported | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | **optional arguments** | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--numeric-owner`` | only consider numeric user and group identifiers | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--same-chunker-params`` | Override check of chunker parameters. | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--sort`` | Sort the output lines by file path. | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--json-lines`` | Format output as JSON Lines. | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | **Exclusion options** | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ + | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | + +-------------------------------------------------------+---------------------------------------+----------------------------------------------------------------------------+ .. raw:: html @@ -60,7 +56,7 @@ borg diff .. only:: latex - REPO_ARCHIVE1 + REPO::ARCHIVE1 repository location and ARCHIVE1 name ARCHIVE2 ARCHIVE2 name (no repository location allowed) @@ -72,6 +68,7 @@ borg diff --numeric-owner only consider numeric user and group identifiers --same-chunker-params Override check of chunker parameters. --sort Sort the output lines by file path. + --json-lines Format output as JSON Lines. :ref:`common_options` @@ -82,9 +79,6 @@ borg diff --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line - --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) - --exclude-if-present NAME exclude directories that are tagged by containing a filesystem object with the given NAME - --keep-exclude-tags, --keep-tag-files if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive Description @@ -92,7 +86,7 @@ Description This command finds differences (file contents, user/group/mode) between archives. -A repository location and an archive name must be specified for REPO_ARCHIVE1. +A repository location and an archive name must be specified for REPO::ARCHIVE1. ARCHIVE2 is just another archive name in same repository (no repository location allowed). diff --git a/docs/usage/extract.rst.inc b/docs/usage/extract.rst.inc index ef9a0c75c9..141f68d086 100644 --- a/docs/usage/extract.rst.inc +++ b/docs/usage/extract.rst.inc @@ -27,6 +27,12 @@ borg extract +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--numeric-owner`` | only obey numeric user and group identifiers | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ + | | ``--nobsdflags`` | do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE) | + +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ + | | ``--noacls`` | do not extract/set ACLs | + +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ + | | ``--noxattrs`` | do not extract/set xattrs | + +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--stdout`` | write all extracted data to stdout | +-------------------------------------------------------+---------------------------------------+-----------------------------------------------------------------------------------------------------------+ | | ``--sparse`` | create holes in output sparse file from all-zero chunks | @@ -68,6 +74,9 @@ borg extract --list output verbose list of items (files, dirs, ...) -n, --dry-run do not actually change any files --numeric-owner only obey numeric user and group identifiers + --nobsdflags do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE) + --noacls do not extract/set ACLs + --noxattrs do not extract/set xattrs --stdout write all extracted data to stdout --sparse create holes in output sparse file from all-zero chunks diff --git a/docs/usage/general.rst b/docs/usage/general.rst index 127ab66762..55146498f1 100644 --- a/docs/usage/general.rst +++ b/docs/usage/general.rst @@ -16,7 +16,7 @@ of values (e.g. ``--encryption`` of :ref:`borg_init`). Experimental features are not stable, which means that they may be changed in incompatible ways or even removed entirely without prior notice in following releases. -.. include:: ../usage_general.rst.inc +.. include:: usage_general.rst.inc In case you are interested in more details (like formulas), please see :ref:`internals`. For details on the available JSON output, refer to @@ -30,3 +30,25 @@ Common options All Borg commands share these options: .. include:: common-options.rst.inc + +Option ``--bypass-lock`` allows you to access the repository while bypassing +borg's locking mechanism. This is necessary if your repository is on a read-only +storage where you don't have write permissions or capabilities and therefore +cannot create a lock. Examples are repositories stored on a Bluray disc or a +read-only network storage. Avoid this option if you are able to use locks as +that is the safer way; see the warning below. + +.. warning:: + + If you do use ``--bypass-lock``, you are responsible to ensure that no other + borg instances have write access to the repository. Otherwise, you might + experience errors and read broken data if changes to that repository are + being made at the same time. + +Examples +~~~~~~~~ +:: + + # Create an archive and log: borg version, files list, return code + $ borg create --show-version --list --show-rc /path/to/repo::my-files files + diff --git a/docs/usage/general/date-time.rst.inc b/docs/usage/general/date-time.rst.inc new file mode 100644 index 0000000000..4926eb3bd0 --- /dev/null +++ b/docs/usage/general/date-time.rst.inc @@ -0,0 +1,10 @@ +Date and Time +~~~~~~~~~~~~~ + +We format date and time conforming to ISO-8601, that is: YYYY-MM-DD and +HH:MM:SS (24h clock). + +For more information about that, see: https://xkcd.com/1179/ + +Unless otherwise noted, we display local date and time. +Internally, we store and process date and time as UTC. diff --git a/docs/usage/general/environment.rst.inc b/docs/usage/general/environment.rst.inc new file mode 100644 index 0000000000..7eb73c9d73 --- /dev/null +++ b/docs/usage/general/environment.rst.inc @@ -0,0 +1,151 @@ +Environment Variables +~~~~~~~~~~~~~~~~~~~~~ + +Borg uses some environment variables for automation: + +General: + BORG_REPO + When set, use the value to give the default repository location. If a command needs an archive + parameter, you can abbreviate as ``::archive``. If a command needs a repository parameter, you + can either leave it away or abbreviate as ``::``, if a positional parameter is required. + BORG_PASSPHRASE + When set, use the value to answer the passphrase question for encrypted repositories. + It is used when a passphrase is needed to access an encrypted repo as well as when a new + passphrase should be initially set when initializing an encrypted repo. + See also BORG_NEW_PASSPHRASE. + BORG_PASSCOMMAND + When set, use the standard output of the command (trailing newlines are stripped) to answer the + passphrase question for encrypted repositories. + It is used when a passphrase is needed to access an encrypted repo as well as when a new + passphrase should be initially set when initializing an encrypted repo. Note that the command + is executed without a shell. So variables, like ``$HOME`` will work, but ``~`` won't. + If BORG_PASSPHRASE is also set, it takes precedence. + See also BORG_NEW_PASSPHRASE. + BORG_PASSPHRASE_FD + When set, specifies a file descriptor to read a passphrase + from. Programs starting borg may choose to open an anonymous pipe + and use it to pass a passphrase. This is safer than passing via + BORG_PASSPHRASE, because on some systems (e.g. Linux) environment + can be examined by other processes. + If BORG_PASSPHRASE or BORG_PASSCOMMAND are also set, they take precedence. + BORG_NEW_PASSPHRASE + When set, use the value to answer the passphrase question when a **new** passphrase is asked for. + This variable is checked first. If it is not set, BORG_PASSPHRASE and BORG_PASSCOMMAND will also + be checked. + Main usecase for this is to fully automate ``borg change-passphrase``. + BORG_DISPLAY_PASSPHRASE + When set, use the value to answer the "display the passphrase for verification" question when defining a new passphrase for encrypted repositories. + BORG_HOSTNAME_IS_UNIQUE=no + Borg assumes that it can derive a unique hostname / identity (see ``borg debug info``). + If this is not the case or you do not want Borg to automatically remove stale locks, + set this to *no*. + BORG_HOST_ID + Borg usually computes a host id from the FQDN plus the results of ``uuid.getnode()`` (which usually returns + a unique id based on the MAC address of the network interface. Except if that MAC happens to be all-zero - in + that case it returns a random value, which is not what we want (because it kills automatic stale lock removal). + So, if you have a all-zero MAC address or other reasons to better externally control the host id, just set this + environment variable to a unique value. If all your FQDNs are unique, you can just use the FQDN. If not, + use fqdn@uniqueid. + BORG_LOGGING_CONF + When set, use the given filename as INI_-style logging configuration. + A basic example conf can be found at ``docs/misc/logging.conf``. + BORG_RSH + When set, use this command instead of ``ssh``. This can be used to specify ssh options, such as + a custom identity file ``ssh -i /path/to/private/key``. See ``man ssh`` for other options. Using + the ``--rsh CMD`` commandline option overrides the environment variable. + BORG_REMOTE_PATH + When set, use the given path as borg executable on the remote (defaults to "borg" if unset). + Using ``--remote-path PATH`` commandline option overrides the environment variable. + BORG_FILES_CACHE_SUFFIX + When set to a value at least one character long, instructs borg to use a specifically named + (based on the suffix) alternative files cache. This can be used to avoid loading and saving + cache entries for backup sources other than the current sources. + BORG_FILES_CACHE_TTL + When set to a numeric value, this determines the maximum "time to live" for the files cache + entries (default: 20). The files cache is used to quickly determine whether a file is unchanged. + The FAQ explains this more detailed in: :ref:`always_chunking` + BORG_SHOW_SYSINFO + When set to no (default: yes), system information (like OS, Python version, ...) in + exceptions is not shown. + Please only use for good reasons as it makes issues harder to analyze. + BORG_WORKAROUNDS + A list of comma separated strings that trigger workarounds in borg, + e.g. to work around bugs in other software. + + Currently known strings are: + + basesyncfile + Use the more simple BaseSyncFile code to avoid issues with sync_file_range. + You might need this to run borg on WSL (Windows Subsystem for Linux) or + in systemd.nspawn containers on some architectures (e.g. ARM). + Using this does not affect data safety, but might result in a more bursty + write to disk behaviour (not continuously streaming to disk). + +Some automatic "answerers" (if set, they automatically answer confirmation questions): + BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=no (or =yes) + For "Warning: Attempting to access a previously unknown unencrypted repository" + BORG_RELOCATED_REPO_ACCESS_IS_OK=no (or =yes) + For "Warning: The repository at location ... was previously located at ..." + BORG_CHECK_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) + For "This is a potentially dangerous function..." (check --repair) + BORG_DELETE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) + For "You requested to completely DELETE the repository *including* all archives it contains:" + + Note: answers are case sensitive. setting an invalid answer value might either give the default + answer or ask you interactively, depending on whether retries are allowed (they by default are + allowed). So please test your scripts interactively before making them a non-interactive script. + +.. _XDG env var: https://specifications.freedesktop.org/basedir-spec/0.6/ar01s03.html + +Directories and files: + BORG_BASE_DIR + Defaults to ``$HOME`` or ``~$USER`` or ``~`` (in that order). + If you want to move all borg-specific folders to a custom path at once, all you need to do is + to modify ``BORG_BASE_DIR``: the other paths for cache, config etc. will adapt accordingly + (assuming you didn't set them to a different custom value). + BORG_CACHE_DIR + Defaults to ``$BORG_BASE_DIR/.cache/borg``. If ``BORG_BASE_DIR`` is not explicitly set while + `XDG env var`_ ``XDG_CACHE_HOME`` is set, then ``$XDG_CACHE_HOME/borg`` is being used instead. + This directory contains the local cache and might need a lot + of space for dealing with big repositories. Make sure you're aware of the associated + security aspects of the cache location: :ref:`cache_security` + BORG_CONFIG_DIR + Defaults to ``$BORG_BASE_DIR/.config/borg``. If ``BORG_BASE_DIR`` is not explicitly set while + `XDG env var`_ ``XDG_CONFIG_HOME`` is set, then ``$XDG_CONFIG_HOME/borg`` is being used instead. + This directory contains all borg configuration directories, see the FAQ + for a security advisory about the data in this directory: :ref:`home_config_borg` + BORG_SECURITY_DIR + Defaults to ``$BORG_CONFIG_DIR/security``. + This directory contains information borg uses to track its usage of NONCES ("numbers used + once" - usually in encryption context) and other security relevant data. + BORG_KEYS_DIR + Defaults to ``$BORG_CONFIG_DIR/keys``. + This directory contains keys for encrypted repositories. + BORG_KEY_FILE + When set, use the given filename as repository key file. + TMPDIR + This is where temporary files are stored (might need a lot of temporary space for some + operations), see tempfile_ for details. + +Building: + BORG_OPENSSL_PREFIX + Adds given OpenSSL header file directory to the default locations (setup.py). + BORG_LIBLZ4_PREFIX + Adds given prefix directory to the default locations. If a 'include/lz4.h' is found Borg + will be linked against the system liblz4 instead of a bundled implementation. (setup.py) + BORG_LIBB2_PREFIX + Adds given prefix directory to the default locations. If a 'include/blake2.h' is found Borg + will be linked against the system libb2 instead of a bundled implementation. (setup.py) + BORG_LIBZSTD_PREFIX + Adds given prefix directory to the default locations. If a 'include/zstd.h' is found Borg + will be linked against the system libzstd instead of a bundled implementation. (setup.py) + +Please note: + +- Be very careful when using the "yes" sayers, the warnings with prompt exist for your / your data's security/safety. +- Also be very careful when putting your passphrase into a script, make sure it has appropriate file permissions (e.g. + mode 600, root:root). + +.. _INI: https://docs.python.org/3/library/logging.config.html#configuration-file-format + +.. _tempfile: https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir diff --git a/docs/usage/general/file-metadata.rst.inc b/docs/usage/general/file-metadata.rst.inc new file mode 100644 index 0000000000..8f4c67cbfb --- /dev/null +++ b/docs/usage/general/file-metadata.rst.inc @@ -0,0 +1,61 @@ +Support for file metadata +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Besides regular file and directory structures, Borg can preserve + +* symlinks (stored as symlink, the symlink is not followed) +* special files: + + * character and block device files (restored via mknod) + * FIFOs ("named pipes") + * special file *contents* can be backed up in ``--read-special`` mode. + By default the metadata to create them with mknod(2), mkfifo(2) etc. is stored. +* hardlinked regular files, devices, FIFOs (considering all items in the same archive) +* timestamps in nanosecond precision: mtime, atime, ctime +* other timestamps: birthtime (on platforms supporting it) +* permissions: + + * IDs of owning user and owning group + * names of owning user and owning group (if the IDs can be resolved) + * Unix Mode/Permissions (u/g/o permissions, suid, sgid, sticky) + +On some platforms additional features are supported: + +.. Yes/No's are grouped by reason/mechanism/reference. + ++-------------------------+----------+-----------+------------+ +| Platform | ACLs | xattr | Flags | +| | [#acls]_ | [#xattr]_ | [#flags]_ | ++=========================+==========+===========+============+ +| Linux | Yes | Yes | Yes [1]_ | ++-------------------------+----------+-----------+------------+ +| Mac OS X | Yes | Yes | Yes (all) | ++-------------------------+----------+-----------+------------+ +| FreeBSD | Yes | Yes | Yes (all) | ++-------------------------+----------+-----------+------------+ +| OpenBSD | n/a | n/a | Yes (all) | ++-------------------------+----------+-----------+------------+ +| NetBSD | n/a | No [2]_ | Yes (all) | ++-------------------------+----------+-----------+------------+ +| Solaris and derivatives | No [3]_ | No [3]_ | n/a | ++-------------------------+----------+-----------+------------+ +| Windows (cygwin) | No [4]_ | No | No | ++-------------------------+----------+-----------+------------+ + +Other Unix-like operating systems may work as well, but have not been tested at all. + +Note that most of the platform-dependent features also depend on the file system. +For example, ntfs-3g on Linux isn't able to convey NTFS ACLs. + +.. [1] Only "nodump", "immutable", "compressed" and "append" are supported. + Feature request :issue:`618` for more flags. +.. [2] Feature request :issue:`1332` +.. [3] Feature request :issue:`1337` +.. [4] Cygwin tries to map NTFS ACLs to permissions with varying degrees of success. + +.. [#acls] The native access control list mechanism of the OS. This normally limits access to + non-native ACLs. For example, NTFS ACLs aren't completely accessible on Linux with ntfs-3g. +.. [#xattr] extended attributes; key-value pairs attached to a file, mainly used by the OS. + This includes resource forks on Mac OS X. +.. [#flags] aka *BSD flags*. The Linux set of flags [1]_ is portable across platforms. + The BSDs define additional flags. diff --git a/docs/usage/general/file-systems.rst.inc b/docs/usage/general/file-systems.rst.inc new file mode 100644 index 0000000000..1b29969fde --- /dev/null +++ b/docs/usage/general/file-systems.rst.inc @@ -0,0 +1,28 @@ +File systems +~~~~~~~~~~~~ + +We strongly recommend against using Borg (or any other database-like +software) on non-journaling file systems like FAT, since it is not +possible to assume any consistency in case of power failures (or a +sudden disconnect of an external drive or similar failures). + +While Borg uses a data store that is resilient against these failures +when used on journaling file systems, it is not possible to guarantee +this with some hardware -- independent of the software used. We don't +know a list of affected hardware. + +If you are suspicious whether your Borg repository is still consistent +and readable after one of the failures mentioned above occurred, run +``borg check --verify-data`` to make sure it is consistent. + +.. rubric:: Requirements for Borg repository file systems + +- Long file names +- At least three directory levels with short names +- Typically, file sizes up to a few hundred MB. + Large repositories may require large files (>2 GB). +- Up to 1000 files per directory (10000 for repositories initialized with Borg 1.0) +- mkdir(2) should be atomic, since it is used for locking +- Hardlinks are needed for :ref:`borg_upgrade` (if ``--inplace`` option is not used). + Also hardlinks are used for more safe and secure file updating (e.g. of the repo + config file), but the code tries to work also if hardlinks are not supported. diff --git a/docs/usage/general/logging.rst.inc b/docs/usage/general/logging.rst.inc new file mode 100644 index 0000000000..ba1dd8f960 --- /dev/null +++ b/docs/usage/general/logging.rst.inc @@ -0,0 +1,44 @@ +Logging +~~~~~~~ + +Borg writes all log output to stderr by default. But please note that something +showing up on stderr does *not* indicate an error condition just because it is +on stderr. Please check the log levels of the messages and the return code of +borg for determining error, warning or success conditions. + +If you want to capture the log output to a file, just redirect it: + +:: + + borg create repo::archive myfiles 2>> logfile + + +Custom logging configurations can be implemented via BORG_LOGGING_CONF. + +The log level of the builtin logging configuration defaults to WARNING. +This is because we want Borg to be mostly silent and only output +warnings, errors and critical messages, unless output has been requested +by supplying an option that implies output (e.g. ``--list`` or ``--progress``). + +Log levels: DEBUG < INFO < WARNING < ERROR < CRITICAL + +Use ``--debug`` to set DEBUG log level - +to get debug, info, warning, error and critical level output. + +Use ``--info`` (or ``-v`` or ``--verbose``) to set INFO log level - +to get info, warning, error and critical level output. + +Use ``--warning`` (default) to set WARNING log level - +to get warning, error and critical level output. + +Use ``--error`` to set ERROR log level - +to get error and critical level output. + +Use ``--critical`` to set CRITICAL log level - +to get critical level output. + +While you can set misc. log levels, do not expect that every command will +give different output on different log levels - it's just a possibility. + +.. warning:: Options ``--critical`` and ``--error`` are provided for completeness, + their usage is not recommended as you might miss important information. diff --git a/docs/usage/general/positional-arguments.rst.inc b/docs/usage/general/positional-arguments.rst.inc new file mode 100644 index 0000000000..718bd08be4 --- /dev/null +++ b/docs/usage/general/positional-arguments.rst.inc @@ -0,0 +1,15 @@ +Positional Arguments and Options: Order matters +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Borg only supports taking options (``-s`` and ``--progress`` in the example) +to the left or right of all positional arguments (``repo::archive`` and ``path`` +in the example), but not in between them: + +:: + + borg create -s --progress repo::archive path # good and preferred + borg create repo::archive path -s --progress # also works + borg create -s repo::archive path --progress # works, but ugly + borg create repo::archive -s --progress path # BAD + +This is due to a problem in the argparse module: https://bugs.python.org/issue15112 diff --git a/docs/usage/general/repository-locations.rst.inc b/docs/usage/general/repository-locations.rst.inc new file mode 100644 index 0000000000..06799d7bb3 --- /dev/null +++ b/docs/usage/general/repository-locations.rst.inc @@ -0,0 +1,13 @@ +Repository / Archive Locations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Many commands want either a repository (just give the repo URL, see above) or +an archive location, which is a repo URL followed by ``::archive_name``. + +Archive names must not contain the ``/`` (slash) character. For simplicity, +maybe also avoid blanks or other characters that have special meaning on the +shell or in a filesystem (borg mount will use the archive name as directory +name). + +If you have set BORG_REPO (see above) and an archive location is needed, use +``::archive_name`` - the repo URL part is then read from BORG_REPO. diff --git a/docs/usage/general/repository-urls.rst.inc b/docs/usage/general/repository-urls.rst.inc new file mode 100644 index 0000000000..2855c19b24 --- /dev/null +++ b/docs/usage/general/repository-urls.rst.inc @@ -0,0 +1,54 @@ +Repository URLs +~~~~~~~~~~~~~~~ + +**Local filesystem** (or locally mounted network filesystem): + +``/path/to/repo`` - filesystem path to repo directory, absolute path + +``path/to/repo`` - filesystem path to repo directory, relative path + +Also, stuff like ``~/path/to/repo`` or ``~other/path/to/repo`` works (this is +expanded by your shell). + +Note: you may also prepend a ``file://`` to a filesystem path to get URL style. + +**Remote repositories** accessed via ssh user@host: + +``user@host:/path/to/repo`` - remote repo, absolute path + +``ssh://user@host:port/path/to/repo`` - same, alternative syntax, port can be given + + +**Remote repositories with relative paths** can be given using this syntax: + +``user@host:path/to/repo`` - path relative to current directory + +``user@host:~/path/to/repo`` - path relative to user's home directory + +``user@host:~other/path/to/repo`` - path relative to other's home directory + +Note: giving ``user@host:/./path/to/repo`` or ``user@host:/~/path/to/repo`` or +``user@host:/~other/path/to/repo`` is also supported, but not required here. + + +**Remote repositories with relative paths, alternative syntax with port**: + +``ssh://user@host:port/./path/to/repo`` - path relative to current directory + +``ssh://user@host:port/~/path/to/repo`` - path relative to user's home directory + +``ssh://user@host:port/~other/path/to/repo`` - path relative to other's home directory + + +If you frequently need the same repo URL, it is a good idea to set the +``BORG_REPO`` environment variable to set a default for the repo URL: + +:: + + export BORG_REPO='ssh://user@host:port/path/to/repo' + +Then just leave away the repo URL if only a repo URL is needed and you want +to use the default - it will be read from BORG_REPO then. + +Use ``::`` syntax to give the repo URL when syntax requires giving a positional +argument for the repo (e.g. ``borg mount :: /mnt``). diff --git a/docs/usage/general/resources.rst.inc b/docs/usage/general/resources.rst.inc new file mode 100644 index 0000000000..4f55a4cd50 --- /dev/null +++ b/docs/usage/general/resources.rst.inc @@ -0,0 +1,95 @@ +Resource Usage +~~~~~~~~~~~~~~ + +Borg might use a lot of resources depending on the size of the data set it is dealing with. + +If one uses Borg in a client/server way (with a ssh: repository), +the resource usage occurs in part on the client and in another part on the +server. + +If one uses Borg as a single process (with a filesystem repo), +all the resource usage occurs in that one process, so just add up client + +server to get the approximate resource usage. + +CPU client: + - **borg create:** does chunking, hashing, compression, crypto (high CPU usage) + - **chunks cache sync:** quite heavy on CPU, doing lots of hashtable operations. + - **borg extract:** crypto, decompression (medium to high CPU usage) + - **borg check:** similar to extract, but depends on options given. + - **borg prune / borg delete archive:** low to medium CPU usage + - **borg delete repo:** done on the server + + It won't go beyond 100% of 1 core as the code is currently single-threaded. + Especially higher zlib and lzma compression levels use significant amounts + of CPU cycles. Crypto might be cheap on the CPU (if hardware accelerated) or + expensive (if not). + +CPU server: + It usually doesn't need much CPU, it just deals with the key/value store + (repository) and uses the repository index for that. + + borg check: the repository check computes the checksums of all chunks + (medium CPU usage) + borg delete repo: low CPU usage + +CPU (only for client/server operation): + When using borg in a client/server way with a ssh:-type repo, the ssh + processes used for the transport layer will need some CPU on the client and + on the server due to the crypto they are doing - esp. if you are pumping + big amounts of data. + +Memory (RAM) client: + The chunks index and the files index are read into memory for performance + reasons. Might need big amounts of memory (see below). + Compression, esp. lzma compression with high levels might need substantial + amounts of memory. + +Memory (RAM) server: + The server process will load the repository index into memory. Might need + considerable amounts of memory, but less than on the client (see below). + +Chunks index (client only): + Proportional to the amount of data chunks in your repo. Lots of chunks + in your repo imply a big chunks index. + It is possible to tweak the chunker params (see create options). + +Files index (client only): + Proportional to the amount of files in your last backups. Can be switched + off (see create options), but next backup might be much slower if you do. + The speed benefit of using the files cache is proportional to file size. + +Repository index (server only): + Proportional to the amount of data chunks in your repo. Lots of chunks + in your repo imply a big repository index. + It is possible to tweak the chunker params (see create options) to + influence the amount of chunks being created. + +Temporary files (client): + Reading data and metadata from a FUSE mounted repository will consume up to + the size of all deduplicated, small chunks in the repository. Big chunks + won't be locally cached. + +Temporary files (server): + A non-trivial amount of data will be stored on the remote temp directory + for each client that connects to it. For some remotes, this can fill the + default temporary directory at /tmp. This can be remediated by ensuring the + $TMPDIR, $TEMP, or $TMP environment variable is properly set for the sshd + process. + For some OSes, this can be done just by setting the correct value in the + .bashrc (or equivalent login config file for other shells), however in + other cases it may be necessary to first enable ``PermitUserEnvironment yes`` + in your ``sshd_config`` file, then add ``environment="TMPDIR=/my/big/tmpdir"`` + at the start of the public key to be used in the ``authorized_hosts`` file. + +Cache files (client only): + Contains the chunks index and files index (plus a collection of single- + archive chunk indexes which might need huge amounts of disk space, + depending on archive count and size - see FAQ about how to reduce). + +Network (only for client/server operation): + If your repository is remote, all deduplicated (and optionally compressed/ + encrypted) data of course has to go over the connection (``ssh://`` repo url). + If you use a locally mounted network filesystem, additionally some copy + operations used for transaction support also go over the connection. If + you backup multiple sources to one target repository, additional traffic + happens for cache resynchronization. diff --git a/docs/usage/general/return-codes.rst.inc b/docs/usage/general/return-codes.rst.inc new file mode 100644 index 0000000000..68f458c4d8 --- /dev/null +++ b/docs/usage/general/return-codes.rst.inc @@ -0,0 +1,18 @@ +Return codes +~~~~~~~~~~~~ + +Borg can exit with the following return codes (rc): + +=========== ======= +Return code Meaning +=========== ======= +0 success (logged as INFO) +1 warning (operation reached its normal end, but there were warnings -- + you should check the log, logged as WARNING) +2 error (like a fatal error, a local or remote exception, the operation + did not reach its normal end, logged as ERROR) +128+N killed by signal N (e.g. 137 == kill -9) +=========== ======= + +If you use ``--show-rc``, the return code is also logged at the indicated +level as the last log entry. diff --git a/docs/usage/general/units.rst.inc b/docs/usage/general/units.rst.inc new file mode 100644 index 0000000000..3d9d1daf19 --- /dev/null +++ b/docs/usage/general/units.rst.inc @@ -0,0 +1,11 @@ +Units +~~~~~ + +To display quantities, Borg takes care of respecting the +usual conventions of scale. Disk sizes are displayed in `decimal +`_, using powers of ten (so +``kB`` means 1000 bytes). For memory usage, `binary prefixes +`_ are used, and are +indicated using the `IEC binary prefixes +`_, +using powers of two (so ``KiB`` means 1024 bytes). diff --git a/docs/usage/help.rst.inc b/docs/usage/help.rst.inc index 152e01b540..4ad79fde37 100644 --- a/docs/usage/help.rst.inc +++ b/docs/usage/help.rst.inc @@ -6,13 +6,31 @@ borg help patterns ~~~~~~~~~~~~~~~~~~ +The path/filenames used as input for the pattern matching start from the +currently active recursion root. You usually give the recursion root(s) +when invoking borg and these can be either relative or absolute paths. + +So, when you give `relative/` as root, the paths going into the matcher +will look like `relative/.../file.ext`. When you give `/absolute/` as +root, they will look like `/absolute/.../file.ext`. This is meant when +we talk about "full path" below. + +File paths in Borg archives are always stored normalized and relative. +This means that e.g. ``borg create /path/to/repo ../some/path`` will +store all files as `some/path/.../file.ext` and ``borg create +/path/to/repo /home/user`` will store all files as +`home/user/.../file.ext`. Therefore, always use relative paths in your +patterns when matching archive content in commands like ``extract`` or +``mount``. Starting with Borg 1.2 this behaviour will be changed to +accept both absolute and relative paths. + File patterns support these styles: fnmatch, shell, regular expressions, path prefixes and path full-matches. By default, fnmatch is used for -``--exclude`` patterns and shell-style is used for the experimental ``--pattern`` -option. +``--exclude`` patterns and shell-style is used for the experimental +``--pattern`` option. -If followed by a colon (':') the first two characters of a pattern are used as a -style selector. Explicit style selection is necessary when a +If followed by a colon (':') the first two characters of a pattern are +used as a style selector. Explicit style selection is necessary when a non-default style is desired or when the desired pattern starts with two alphanumeric characters followed by a colon (i.e. `aa:something/*`). @@ -22,11 +40,11 @@ two alphanumeric characters followed by a colon (i.e. `aa:something/*`). any number of characters, '?' matching any single character, '[...]' matching any single character specified, including ranges, and '[!...]' matching any character not specified. For the purpose of these patterns, - the path separator ('\' for Windows and '/' on other systems) is not + the path separator (backslash for Windows and '/' on other systems) is not treated specially. Wrap meta-characters in brackets for a literal match (i.e. `[?]` to match the literal character `?`). For a path - to match a pattern, it must completely match from start to end, or - must match from the start to just before a path separator. Except + to match a pattern, the full path must match, or it must match + from the start of the full path to just before a path separator. Except for the root path, paths will never end in the path separator when matching is attempted. Thus, if a given pattern ends in a path separator, a '\*' is appended before matching is attempted. @@ -40,23 +58,23 @@ Shell-style patterns, selector `sh:` Regular expressions, selector `re:` Regular expressions similar to those found in Perl are supported. Unlike - shell patterns regular expressions are not required to match the complete + shell patterns regular expressions are not required to match the full path and any substring match is sufficient. It is strongly recommended to anchor patterns to the start ('^'), to the end ('$') or both. Path - separators ('\' for Windows and '/' on other systems) in paths are + separators (backslash for Windows and '/' on other systems) in paths are always normalized to a forward slash ('/') before applying a pattern. The regular expression syntax is described in the `Python documentation for the re module `_. Path prefix, selector `pp:` This pattern style is useful to match whole sub-directories. The pattern - `pp:/data/bar` matches `/data/bar` and everything therein. + `pp:root/somedir` matches `root/somedir` and everything therein. Path full-match, selector `pf:` - This pattern style is useful to match whole paths. + This pattern style is (only) useful to match full paths. This is kind of a pseudo pattern as it can not have any variable or - unspecified parts - the full, precise path must be given. - `pf:/data/foo.txt` matches `/data/foo.txt` only. + unspecified parts - the full path must be given. + `pf:root/file.ext` matches `root/file.ext` only. Implementation note: this is implemented via very time-efficient O(1) hashtable lookups (this means you can have huge amounts of such patterns @@ -77,16 +95,19 @@ Path full-match, selector `pf:` wildcards at most. Exclusions can be passed via the command line option ``--exclude``. When used -from within a shell the patterns should be quoted to protect them from +from within a shell, the patterns should be quoted to protect them from expansion. The ``--exclude-from`` option permits loading exclusion patterns from a text file with one pattern per line. Lines empty or starting with the number sign ('#') after removing whitespace on both ends are ignored. The optional style selector prefix is also supported for patterns loaded from a file. Due to -whitespace removal paths with whitespace at the beginning or end can only be +whitespace removal, paths with whitespace at the beginning or end can only be excluded using regular expressions. +To test your exclusion patterns without performing an actual backup you can +run ``borg create --list --dry-run ...``. + Examples:: # Exclude '/home/user/file.o' but not '/home/user/file.odt': @@ -112,8 +133,10 @@ Examples:: /home/*/junk *.tmp fm:aa:something/* - re:^/home/[^/]\.tmp/ - sh:/home/*/.thumbnails + re:^home/[^/]\.tmp/ + sh:home/*/.thumbnails + # Example with spaces, no need to escape as it is processed by borg + some file with spaces.txt EOF $ borg create --exclude-from exclude.txt backup / @@ -124,10 +147,19 @@ Examples:: may specify the backup roots (starting points) and patterns for inclusion/exclusion. A root path starts with the prefix `R`, followed by a path (a plain path, not a file pattern). An include rule starts with the prefix +, an exclude rule starts - with the prefix -, both followed by a pattern. + with the prefix -, an exclude-norecurse rule starts with !, all followed by a pattern. + + .. note:: + + Via ``--pattern`` or ``--patterns-from`` you can define BOTH inclusion and exclusion + of files using pattern prefixes ``+`` and ``-``. With ``--exclude`` and + ``--exlude-from`` ONLY excludes are defined. + Inclusion patterns are useful to include paths that are contained in an excluded path. The first matching pattern is used so if an include pattern matches before - an exclude pattern, the file is backed up. + an exclude pattern, the file is backed up. If an exclude-norecurse pattern matches + a directory, it won't recurse into it and won't discover any potential matches for + include rules below that directory. Note that the default pattern style for ``--pattern`` and ``--patterns-from`` is shell style (`sh:`), so those patterns behave similar to rsync include/exclude @@ -137,7 +169,16 @@ Examples:: considered first (in the order of appearance). Then patterns from ``--patterns-from`` are added. Exclusion patterns from ``--exclude-from`` files are appended last. - An example ``--patterns-from`` file could look like that:: + Examples:: + + # backup pics, but not the ones from 2018, except the good ones: + # note: using = is essential to avoid cmdline argument parsing issues. + borg create --pattern=+pics/2018/good --pattern=-pics/2018 repo::arch pics + + # use a file with patterns: + borg create --patterns-from patterns.lst repo::arch + + The patterns.lst file could look like that:: # "sh:" pattern style is the default, so the following line is not needed: P sh @@ -151,6 +192,8 @@ Examples:: + /home/susan # don't backup the other home directories - /home/* + # don't even look in /proc + ! /proc .. _borg_placeholders: @@ -158,8 +201,8 @@ borg help placeholders ~~~~~~~~~~~~~~~~~~~~~~ -Repository (or Archive) URLs, ``--prefix`` and ``--remote-path`` values support these -placeholders: +Repository (or Archive) URLs, ``--prefix``, ``--glob-archives``, ``--comment`` +and ``--remote-path`` values support these placeholders: {hostname} The (short) hostname of the machine. @@ -167,13 +210,16 @@ placeholders: {fqdn} The full name of the machine. +{reverse-fqdn} + The full name of the machine in reverse domain name notation. + {now} The current local date and time, by default in ISO-8601 format. - You can also supply your own `format string `_, e.g. {now:%Y-%m-%d_%H:%M:%S} + You can also supply your own `format string `_, e.g. {now:%Y-%m-%d_%H:%M:%S} {utcnow} The current UTC date and time, by default in ISO-8601 format. - You can also supply your own `format string `_, e.g. {utcnow:%Y-%m-%d_%H:%M:%S} + You can also supply your own `format string `_, e.g. {utcnow:%Y-%m-%d_%H:%M:%S} {user} The user name (or UID, if no name is available) of the user running borg. @@ -236,7 +282,13 @@ none Do not compress. lz4 - Use lz4 compression. High speed, low compression. (default) + Use lz4 compression. Very high speed, very low compression. (default) + +zstd[,L] + Use zstd ("zstandard") compression, a modern wide-range algorithm. + If you do not explicitely give the compression level L (ranging from 1 + to 22), it will use level 3. + Archives compressed with zstd are not compatible with borg < 1.1.4. zlib[,L] Use zlib ("gz") compression. Medium speed, medium compression. @@ -263,6 +315,8 @@ auto,C[,L] Examples:: borg create --compression lz4 REPO::ARCHIVE data + borg create --compression zstd REPO::ARCHIVE data + borg create --compression zstd,10 REPO::ARCHIVE data borg create --compression zlib REPO::ARCHIVE data borg create --compression zlib,1 REPO::ARCHIVE data borg create --compression auto,lzma,6 REPO::ARCHIVE data diff --git a/docs/usage/info.rst.inc b/docs/usage/info.rst.inc index ec5d15098d..0d6bfe9159 100644 --- a/docs/usage/info.rst.inc +++ b/docs/usage/info.rst.inc @@ -15,7 +15,7 @@ borg info +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``REPOSITORY_OR_ARCHIVE`` | archive or repository to display information about | + | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to display information about | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -49,7 +49,7 @@ borg info .. only:: latex REPOSITORY_OR_ARCHIVE - archive or repository to display information about + repository or archive to display information about optional arguments @@ -77,6 +77,11 @@ up to the deduplicated size of the repository ("all archives"), because the two are meaning different things: This archive / deduplicated size = amount of data stored ONLY for this archive - = unique chunks of this archive. += unique chunks of this archive. All archives / deduplicated size = amount of data stored in the repo - = all chunks in the repository. \ No newline at end of file += all chunks in the repository. + +Borg archives can only contain a limited amount of file metadata. +The size of an archive relative to this limit depends on a number of factors, +mainly the number of files, the lengths of paths and other metadata stored for files. +This is shown as *utilization of maximum supported archive size*. \ No newline at end of file diff --git a/docs/usage/init.rst b/docs/usage/init.rst index 97860a15a1..ae0fd8b423 100644 --- a/docs/usage/init.rst +++ b/docs/usage/init.rst @@ -11,7 +11,9 @@ Examples $ borg init --encryption=none /path/to/repo # Remote repository (accesses a remote borg via ssh) + # repokey: stores the (encrypted) key into /config $ borg init --encryption=repokey-blake2 user@hostname:backup - # Remote repository (store the key your home dir) + # Remote repository (accesses a remote borg via ssh) + # keyfile: stores the (encrypted) key into ~/.config/borg/keys/ $ borg init --encryption=keyfile user@hostname:backup diff --git a/docs/usage/init.rst.inc b/docs/usage/init.rst.inc index 68437e57ff..a3e1e0d730 100644 --- a/docs/usage/init.rst.inc +++ b/docs/usage/init.rst.inc @@ -12,23 +12,25 @@ borg init .. class:: borg-options-table - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | **positional arguments** | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | | ``REPOSITORY`` | repository to create | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | **optional arguments** | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | | ``-e MODE``, ``--encryption MODE`` | select encryption key mode **(required)** | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | | ``--append-only`` | create an append-only mode repository | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | | ``--storage-quota QUOTA`` | Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ - | .. class:: borg-common-opt-ref | - | | - | :ref:`common_options` | - +-------------------------------------------------------+------------------------------------+-----------------------------------------------------------------------------+ + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | **positional arguments** | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | | ``REPOSITORY`` | repository to create | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | **optional arguments** | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | | ``-e MODE``, ``--encryption MODE`` | select encryption key mode **(required)** | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | | ``--append-only`` | create an append-only mode repository | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | | ``--storage-quota QUOTA`` | Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | | ``--make-parent-dirs`` | create the parent directories of the repository directory, if they are missing. | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-------------------------------------------------------+------------------------------------+---------------------------------------------------------------------------------+ .. raw:: html @@ -48,6 +50,7 @@ borg init -e MODE, --encryption MODE select encryption key mode **(required)** --append-only create an append-only mode repository --storage-quota QUOTA Set storage quota of the new repository (e.g. 5G, 1.5T). Default: no quota. + --make-parent-dirs create the parent directories of the repository directory, if they are missing. :ref:`common_options` @@ -64,7 +67,11 @@ Encryption can be enabled at repository init time. It cannot be changed later. It is not recommended to work without encryption. Repository encryption protects you e.g. against the case that an attacker has access to your backup repository. -But be careful with the key / the passphrase: +Borg relies on randomly generated key material and uses that for chunking, id +generation, encryption and authentication. The key material is encrypted using +the passphrase you give before it is stored on-disk. + +You need to be careful with the key / the passphrase: If you want "passphrase-only" security, use one of the repokey modes. The key will be stored inside the repository (in its "config" file). In above @@ -102,6 +109,12 @@ the encryption/decryption key or other secrets. Encryption modes ++++++++++++++++ +You can choose from the encryption modes seen in the table below on a per-repo +basis. The mode determines encryption algorithm, hash/MAC algorithm and also the +key storage location. + +Example: `borg init --encryption repokey ...` + .. nanorst: inline-fill +----------+---------------+------------------------+--------------------------+ @@ -117,7 +130,8 @@ Encryption modes .. nanorst: inline-replace -`Marked modes` are new in Borg 1.1 and are not backwards-compatible with Borg 1.0.x. +Modes `marked like this` in the above table are new in Borg 1.1 and are not +backwards-compatible with Borg 1.0.x. On modern Intel/AMD CPUs (except very cheap ones), AES is usually hardware-accelerated. @@ -150,8 +164,8 @@ from the other blake2 modes. This mode is new and *not* compatible with Borg 1.0.x. `none` mode uses no encryption and no authentication. It uses SHA256 as chunk -ID hash. Not recommended, rather consider using an authenticated or -authenticated/encrypted mode. This mode has possible denial-of-service issues +ID hash. This mode is not recommended, you should rather consider using an authenticated +or authenticated/encrypted mode. This mode has possible denial-of-service issues when running ``borg create`` on contents controlled by an attacker. Use it only for new repositories where no encryption is wanted **and** when compatibility with 1.0.x is important. If compatibility with 1.0.x is not important, use diff --git a/docs/usage/key_change-passphrase.rst.inc b/docs/usage/key_change-passphrase.rst.inc index 64bc409c52..ab31b9fdeb 100644 --- a/docs/usage/key_change-passphrase.rst.inc +++ b/docs/usage/key_change-passphrase.rst.inc @@ -43,4 +43,9 @@ Description ~~~~~~~~~~~ The key files used for repository encryption are optionally passphrase -protected. This command can be used to change this passphrase. \ No newline at end of file +protected. This command can be used to change this passphrase. + +Please note that this command only changes the passphrase, but not any +secret protected by it (like e.g. encryption/MAC keys or chunker seed). +Thus, changing the passphrase after passphrase and borg key got compromised +does not protect future (nor past) backups to the same repository. \ No newline at end of file diff --git a/docs/usage/key_export.rst.inc b/docs/usage/key_export.rst.inc index 466ac07d9d..a295138a18 100644 --- a/docs/usage/key_export.rst.inc +++ b/docs/usage/key_export.rst.inc @@ -59,6 +59,9 @@ Description If repository encryption is used, the repository is inaccessible without the key. This command allows to backup this essential key. +Note that the backup produced does not include the passphrase itself +(i.e. the exported key stays encrypted). In order to regain access to a +repository, one needs both the exported key and the original passphrase. There are two backup formats. The normal backup format is suitable for digital storage as a file. The ``--paper`` backup format is optimized diff --git a/docs/usage/list.rst b/docs/usage/list.rst index 4268bff1ad..f0789edd2a 100644 --- a/docs/usage/list.rst +++ b/docs/usage/list.rst @@ -19,7 +19,14 @@ Examples -rwxr-xr-x root root 2140 Fri, 2015-03-27 20:24:22 bin/bzdiff ... - $ borg list /path/to/repo::archiveA --list-format="{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NEWLINE}" + $ borg list /path/to/repo::root-2016-02-15 --pattern "- bin/ba*" + drwxr-xr-x root root 0 Mon, 2016-02-15 17:44:27 . + drwxrwxr-x root root 0 Mon, 2016-02-15 19:04:49 bin + lrwxrwxrwx root root 0 Fri, 2015-03-27 20:24:26 bin/bzcmp -> bzdiff + -rwxr-xr-x root root 2140 Fri, 2015-03-27 20:24:22 bin/bzdiff + ... + + $ borg list /path/to/repo::archiveA --format="{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NEWLINE}" drwxrwxr-x user user 0 Sun, 2015-02-01 11:00:00 . drwxrwxr-x user user 0 Sun, 2015-02-01 11:00:00 code drwxrwxr-x user user 0 Sun, 2015-02-01 11:00:00 code/myproject diff --git a/docs/usage/list.rst.inc b/docs/usage/list.rst.inc index 3358a74bf1..02195533bf 100644 --- a/docs/usage/list.rst.inc +++ b/docs/usage/list.rst.inc @@ -15,7 +15,7 @@ borg list +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``REPOSITORY_OR_ARCHIVE`` | repository/archive to list contents of | + | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to list contents of | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``PATH`` | paths to list; patterns are supported | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -23,7 +23,7 @@ borg list +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--short`` | only print file/directory names, nothing else | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--format FORMAT``, ``--list-format FORMAT`` | specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NL}") | + | | ``--format FORMAT``, ``--list-format FORMAT`` | specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}") | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--json`` | Only valid for listing repository contents. Format output as JSON. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "barchive" key is therefore not available. | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -55,12 +55,6 @@ borg list +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) | - +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | - +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | - +-----------------------------------------------------------------------------+-----------------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html @@ -73,14 +67,14 @@ borg list .. only:: latex REPOSITORY_OR_ARCHIVE - repository/archive to list contents of + repository or archive to list contents of PATH paths to list; patterns are supported optional arguments --short only print file/directory names, nothing else - --format FORMAT, --list-format FORMAT specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {isomtime} {path}{extra}{NL}") + --format FORMAT, --list-format FORMAT specify format for file listing (default: "{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NL}") --json Only valid for listing repository contents. Format output as JSON. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "barchive" key is therefore not available. --json-lines Only valid for listing archive contents. Format output as JSON Lines. The form of ``--format`` is ignored, but keys used in it are added to the JSON output. Some keys are always present. Note: JSON can only represent text. A "bpath" key is therefore not available. @@ -101,9 +95,6 @@ borg list --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line - --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) - --exclude-if-present NAME exclude directories that are tagged by containing a filesystem object with the given NAME - --keep-exclude-tags, --keep-tag-files if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive Description @@ -127,17 +118,20 @@ The following keys are available for ``--format``: Keys for listing repository archives: -- name: archive name interpreted as text (might be missing non-text characters, see barchive) - archive: archive name interpreted as text (might be missing non-text characters, see barchive) +- name: alias of "archive" - barchive: verbatim archive name, can contain any character except NUL - comment: archive comment interpreted as text (might be missing non-text characters, see bcomment) - bcomment: verbatim archive comment, can contain any character except NUL - id: internal ID of the archive -- time: time (start) of creation of the archive - start: time (start) of creation of the archive +- time: alias of "start" - end: time (end) of creation of the archive +- hostname: hostname of host on which this archive was created +- username: username of user who created this archive + Keys for listing archive files: @@ -167,12 +161,20 @@ Keys for listing archive files: - isoctime - isoatime +- blake2b +- blake2s - md5 - sha1 - sha224 - sha256 - sha384 +- sha3_224 +- sha3_256 +- sha3_384 +- sha3_512 - sha512 +- shake_128 +- shake_256 - archiveid - archivename diff --git a/docs/usage/mount.rst b/docs/usage/mount.rst index 46c7d554c5..f170fb317f 100644 --- a/docs/usage/mount.rst +++ b/docs/usage/mount.rst @@ -36,6 +36,12 @@ Examples # which does not support lazy processing of archives. $ borg mount -o versions --glob-archives '*-my-home' --last 10 /path/to/repo /tmp/mymountpoint + # Exclusion options are supported. + # These can speed up mounting and lower memory needs significantly. + $ borg mount /path/to/repo /tmp/mymountpoint only/that/path + $ borg mount --exclude '...' /path/to/repo /tmp/mymountpoint + + borgfs ++++++ diff --git a/docs/usage/mount.rst.inc b/docs/usage/mount.rst.inc index deff366571..f813f55833 100644 --- a/docs/usage/mount.rst.inc +++ b/docs/usage/mount.rst.inc @@ -6,7 +6,7 @@ borg mount ---------- .. code-block:: none - borg [common options] mount [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT + borg [common options] mount [options] REPOSITORY_OR_ARCHIVE MOUNTPOINT [PATH...] .. only:: html @@ -15,10 +15,12 @@ borg mount +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **positional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``REPOSITORY_OR_ARCHIVE`` | repository/archive to mount | + | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to mount | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``MOUNTPOINT`` | where to mount filesystem | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``PATH`` | paths to extract; patterns are supported | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | **optional arguments** | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-f``, ``--foreground`` | stay in foreground, do not daemonize | @@ -41,6 +43,18 @@ borg mount +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``--last N`` | consider last N archives after other filters were applied | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Exclusion options** | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--strip-components NUMBER`` | Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. | + +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html @@ -53,9 +67,11 @@ borg mount .. only:: latex REPOSITORY_OR_ARCHIVE - repository/archive to mount + repository or archive to mount MOUNTPOINT where to mount filesystem + PATH + paths to extract; patterns are supported optional arguments @@ -74,6 +90,14 @@ borg mount --last N consider last N archives after other filters were applied + Exclusion options + -e PATTERN, --exclude PATTERN exclude paths matching PATTERN + --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line + --pattern PATTERN experimental: include/exclude paths matching PATTERN + --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line + --strip-components NUMBER Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped. + + Description ~~~~~~~~~~~ @@ -89,14 +113,18 @@ used in fstab entries: To allow a regular user to use fstab entries, add the ``user`` option: ``/path/to/repo /mnt/point fuse.borgfs defaults,noauto,user 0 0`` -For mount options, see the fuse(8) manual page. Additional mount options -supported by borg: +For FUSE configuration and mount options, see the mount.fuse(8) manual page. + +Additional mount options supported by borg: - versions: when used with a repository mount, this gives a merged, versioned view of the files in the archives. EXPERIMENTAL, layout may change in future. - allow_damaged_files: by default damaged files (where missing chunks were replaced with runs of zeros by borg check ``--repair``) are not readable and return EIO (I/O error). Set this option to read such files. +- ignore_permissions: for security reasons the "default_permissions" mount + option is internally enforced by borg. "ignore_permissions" can be given to + not enforce "default_permissions". The BORG_MOUNT_DATA_CACHE_ENTRIES environment variable is meant for advanced users to tweak the performance. It sets the number of cached data chunks; additional diff --git a/docs/usage/notes.rst b/docs/usage/notes.rst index 85648382e8..3d1e39493b 100644 --- a/docs/usage/notes.rst +++ b/docs/usage/notes.rst @@ -50,6 +50,27 @@ a new repository when changing chunker params. For more details, see :ref:`chunker_details`. +``--noatime / --noctime`` +~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use these ``borg create`` options to not store the respective timestamp +into the archive, in case you do not really need it. + +Besides saving a little space for the not archived timestamp, it might also +affect metadata stream deduplication: if only this timestamp changes between +backups and is stored into the metadata stream, the metadata stream chunks +won't deduplicate just because of that. + +``--nobsdflags`` +~~~~~~~~~~~~~~~~ + +You can use this to not query and store (or not extract and set) bsdflags - +in case you don't need them or if they are broken somehow for your fs. + +On Linux, dealing with the bsflags needs some additional syscalls. +Especially when dealing with lots of small files, this causes a noticeable +overhead, so you can use this option also for speeding up operations. + ``--umask`` ~~~~~~~~~~~ @@ -100,7 +121,7 @@ Imagine you have made some snapshots of logical volumes (LVs) you want to backup For some scenarios, this is a good method to get "crash-like" consistency (I call it crash-like because it is the same as you would get if you just - hit the reset button or your machine would abrubtly and completely crash). + hit the reset button or your machine would abruptly and completely crash). This is better than no consistency at all and a good method for some use cases, but likely not good enough if you have databases running. @@ -111,14 +132,18 @@ original volumes continue changing the data stored there. You also add the output of ``lvdisplay`` to your backup, so you can see the LV sizes in case you ever need to recreate and restore them. -After the backup has completed, you remove the snapshots again. :: +After the backup has completed, you remove the snapshots again. + +:: $ # create snapshots here $ lvdisplay > lvdisplay.txt $ borg create --read-special /path/to/repo::arch lvdisplay.txt /dev/vg0/*-snapshot $ # remove snapshots here -Now, let's see how to restore some LVs from such a backup. :: +Now, let's see how to restore some LVs from such a backup. + +:: $ borg extract /path/to/repo::arch lvdisplay.txt $ # create empty LVs with correct sizes here (look into lvdisplay.txt). @@ -138,15 +163,23 @@ reject to delete the repository completely). This is useful for scenarios where backup client machine backups remotely to a backup server using ``borg serve``, since a hacked client machine cannot delete backups on the server permanently. -To activate append-only mode, edit the repository ``config`` file and add a line -``append_only=1`` to the ``[repository]`` section (or edit the line if it exists). +To activate append-only mode, set ``append_only`` to 1 in the repository config: + +:: + + borg config /path/to/repo append_only 1 + +Note that you can go back-and-forth between normal and append-only operation with +``borg config``; it's not a "one way trip." In append-only mode Borg will create a transaction log in the ``transactions`` file, where each line is a transaction and a UTC timestamp. In addition, ``borg serve`` can act as if a repository is in append-only mode with its option ``--append-only``. This can be very useful for fine-tuning access control -in ``.ssh/authorized_keys`` :: +in ``.ssh/authorized_keys``: + +:: command="borg serve --append-only ..." ssh-rsa command="borg serve ..." ssh-rsa @@ -159,7 +192,9 @@ Example +++++++ Suppose an attacker remotely deleted all backups, but your repository was in append-only -mode. A transaction log in this situation might look like this: :: +mode. A transaction log in this situation might look like this: + +:: transaction 1, UTC time 2016-03-31T15:53:27.383532 transaction 5, UTC time 2016-03-31T15:53:52.588922 @@ -179,15 +214,28 @@ copy just in case something goes wrong during the recovery. Since recovery is do deleting some files, a hard link copy (``cp -al``) is sufficient. The first step to reset the repository to transaction 5, the last uncompromised transaction, -is to remove the ``hints.N`` and ``index.N`` files in the repository (these two files are -always expendable). In this example N is 13. +is to remove the ``hints.N``, ``index.N`` and ``integrity.N`` files in the repository (these +files are always expendable). In this example N is 13. Then remove or move all segment files from the segment directories in ``data/`` starting with file 6:: rm data/**/{6..13} -That's all to it. +That's all to do in the repository. + +If you want to access this rollbacked repository from a client that already has +a cache for this repository, the cache will reflect a newer repository state +than what you actually have in the repository now, after the rollback. + +Thus, you need to clear the cache:: + + borg delete --cache-only repo + +The cache will get rebuilt automatically. Depending on repo size and archive +count, it may take a while. + +You also will need to remove ~/.config/borg/security/REPOID/manifest-timestamp. Drawbacks +++++++++ @@ -198,10 +246,19 @@ won't free disk space, they merely tag data as deleted in a new transaction. Be aware that as soon as you write to the repo in non-append-only mode (e.g. prune, delete or create archives from an admin machine), it will remove the deleted objects permanently (including the ones that were already marked as deleted, but not removed, -in append-only mode). +in append-only mode). Automated edits to the repository (such as a cron job running +``borg prune``) will render append-only mode moot if data is deleted. + +Even if an archive appears to be available, it is possible an attacker could delete +just a few chunks from an archive and silently corrupt its data. While in append-only +mode, this is reversible, but ``borg check`` should be run before a writing/pruning +operation on an append-only repository to catch accidental or malicious corruption:: + + # run without append-only mode + borg check --verify-data repo -Note that you can go back-and-forth between normal and append-only operation by editing -the configuration file, it's not a "one way trip". +Aside from checking repository & archive integrity you may want to also manually check +backups to ensure their content seems correct. Further considerations ++++++++++++++++++++++ diff --git a/docs/usage/prune.rst.inc b/docs/usage/prune.rst.inc index f98fbab847..dc42e18a37 100644 --- a/docs/usage/prune.rst.inc +++ b/docs/usage/prune.rst.inc @@ -21,7 +21,7 @@ borg prune +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-n``, ``--dry-run`` | do not change repository | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--force`` | force pruning of corrupted archives | + | | ``--force`` | force pruning of corrupted archives, use ``--force --force`` in case ``--force`` does not work. | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ | | ``-s``, ``--stats`` | print statistics for the deleted archive | +-----------------------------------------------------------------------------+---------------------------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -72,7 +72,7 @@ borg prune optional arguments -n, --dry-run do not change repository - --force force pruning of corrupted archives + --force force pruning of corrupted archives, use ``--force --force`` in case ``--force`` does not work. -s, --stats print statistics for the deleted archive --list output verbose list of archives it keeps/prunes --keep-within INTERVAL keep all archives within this time interval @@ -134,4 +134,9 @@ negative number of archives to keep means that there is no limit. The ``--keep-last N`` option is doing the same as ``--keep-secondly N`` (and it will keep the last N archives under the assumption that you do not create more than one -backup archive in the same second). \ No newline at end of file +backup archive in the same second). + +When using ``--stats``, you will get some statistics about how much data was +deleted - the "Deleted data" deduplicated size there is most interesting as +that is how much your repository will shrink. +Please note that the "All archives" stats refer to the state after pruning. \ No newline at end of file diff --git a/docs/usage/recreate.rst.inc b/docs/usage/recreate.rst.inc index ef258e6941..05dab0e2a6 100644 --- a/docs/usage/recreate.rst.inc +++ b/docs/usage/recreate.rst.inc @@ -12,59 +12,59 @@ borg recreate .. class:: borg-options-table - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | **positional arguments** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``REPOSITORY_OR_ARCHIVE`` | repository/archive to recreate | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``PATH`` | paths to recreate; patterns are supported | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | **optional arguments** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--list`` | output verbose list of items (files, dirs, ...) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--filter STATUSCHARS`` | only display items with the given status characters (listed in borg create --help) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-n``, ``--dry-run`` | do not change anything | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-s``, ``--stats`` | print statistics at end | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | .. class:: borg-common-opt-ref | - | | - | :ref:`common_options` | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | **Exclusion options** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | **Archive options** | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--target TARGET`` | create a new archive with the name ARCHIVE, do not replace existing archive (only applies for a single archive) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-c SECONDS``, ``--checkpoint-interval SECONDS`` | write checkpoint every SECONDS seconds (Default: 1800) | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--comment COMMENT`` | add a comment text to the archive | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--timestamp TIMESTAMP`` | manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). alternatively, give a reference file/directory. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``-C COMPRESSION``, ``--compression COMPRESSION`` | select compression algorithm, see the output of the "borg help compression" command for details. | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--recompress`` | recompress data chunks according to ``--compression`` if `if-different`. When `always`, chunks that are already compressed that way are not skipped, but compressed again. Only the algorithm is considered for `if-different`, not the compression level (if any). | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - | | ``--chunker-params PARAMS`` | specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or `default` to use the current defaults. default: 19,23,21,4095 | - +-------------------------------------------------------+---------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **positional arguments** | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``REPOSITORY_OR_ARCHIVE`` | repository or archive to recreate | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``PATH`` | paths to recreate; patterns are supported | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **optional arguments** | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--list`` | output verbose list of items (files, dirs, ...) | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--filter STATUSCHARS`` | only display items with the given status characters (listed in borg create --help) | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-n``, ``--dry-run`` | do not change anything | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-s``, ``--stats`` | print statistics at end | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | .. class:: borg-common-opt-ref | + | | + | :ref:`common_options` | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Exclusion options** | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-e PATTERN``, ``--exclude PATTERN`` | exclude paths matching PATTERN | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-from EXCLUDEFILE`` | read exclude patterns from EXCLUDEFILE, one per line | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--pattern PATTERN`` | experimental: include/exclude paths matching PATTERN | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--patterns-from PATTERNFILE`` | experimental: read include/exclude patterns from PATTERNFILE, one per line | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-caches`` | exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--exclude-if-present NAME`` | exclude directories that are tagged by containing a filesystem object with the given NAME | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--keep-exclude-tags``, ``--keep-tag-files`` | if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | **Archive options** | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--target TARGET`` | create a new archive with the name ARCHIVE, do not replace existing archive (only applies for a single archive) | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-c SECONDS``, ``--checkpoint-interval SECONDS`` | write checkpoint every SECONDS seconds (Default: 1800) | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--comment COMMENT`` | add a comment text to the archive | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--timestamp TIMESTAMP`` | manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). alternatively, give a reference file/directory. | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``-C COMPRESSION``, ``--compression COMPRESSION`` | select compression algorithm, see the output of the "borg help compression" command for details. | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--recompress MODE`` | recompress data chunks according to ``--compression``. MODE `if-different`: recompress if current compression is with a different compression algorithm (the level is not considered). MODE `always`: recompress even if current compression is with the same compression algorithm (use this to change the compression level). MODE `never` (default): do not recompress. | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ + | | ``--chunker-params PARAMS`` | specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or `default` to use the current defaults. default: 19,23,21,4095 | + +-------------------------------------------------------+---------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ .. raw:: html @@ -77,7 +77,7 @@ borg recreate .. only:: latex REPOSITORY_OR_ARCHIVE - repository/archive to recreate + repository or archive to recreate PATH paths to recreate; patterns are supported @@ -97,7 +97,7 @@ borg recreate --exclude-from EXCLUDEFILE read exclude patterns from EXCLUDEFILE, one per line --pattern PATTERN experimental: include/exclude paths matching PATTERN --patterns-from PATTERNFILE experimental: read include/exclude patterns from PATTERNFILE, one per line - --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.brynosaurus.com/cachedir/spec.html) + --exclude-caches exclude directories that contain a CACHEDIR.TAG file (http://www.bford.info/cachedir/spec.html) --exclude-if-present NAME exclude directories that are tagged by containing a filesystem object with the given NAME --keep-exclude-tags, --keep-tag-files if tag objects are specified with ``--exclude-if-present``, don't omit the tag objects themselves from the backup archive @@ -108,7 +108,7 @@ borg recreate --comment COMMENT add a comment text to the archive --timestamp TIMESTAMP manually specify the archive creation date/time (UTC, yyyy-mm-ddThh:mm:ss format). alternatively, give a reference file/directory. -C COMPRESSION, --compression COMPRESSION select compression algorithm, see the output of the "borg help compression" command for details. - --recompress recompress data chunks according to ``--compression`` if `if-different`. When `always`, chunks that are already compressed that way are not skipped, but compressed again. Only the algorithm is considered for `if-different`, not the compression level (if any). + --recompress MODE recompress data chunks according to ``--compression``. MODE `if-different`: recompress if current compression is with a different compression algorithm (the level is not considered). MODE `always`: recompress even if current compression is with the same compression algorithm (use this to change the compression level). MODE `never` (default): do not recompress. --chunker-params PARAMS specify the chunker parameters (CHUNK_MIN_EXP, CHUNK_MAX_EXP, HASH_MASK_BITS, HASH_WINDOW_SIZE) or `default` to use the current defaults. default: 19,23,21,4095 @@ -117,11 +117,13 @@ Description Recreate the contents of existing archives. -This is an *experimental* feature. Do *not* use this on your only backup. +recreate is a potentially dangerous function and might lead to data loss +(if used wrongly). BE VERY CAREFUL! -``--exclude``, ``--exclude-from``, ``--exclude-if-present``, ``--keep-exclude-tags``, and PATH -have the exact same semantics as in "borg create". If PATHs are specified the -resulting archive will only contain files from these PATHs. +``--exclude``, ``--exclude-from``, ``--exclude-if-present``, ``--keep-exclude-tags`` +and PATH have the exact same semantics as in "borg create", but they only check +for files in the archives and not in the local file system. If PATHs are specified, +the resulting archives will only contain files from these PATHs. Note that all paths in an archive are relative, therefore absolute patterns/paths will *not* match (``--exclude``, ``--exclude-from``, PATHs). @@ -139,7 +141,7 @@ Borg 1.x archives. Depending on the PATHs and patterns given, recreate can be used to permanently delete files from archives. When in doubt, use ``--dry-run --verbose --list`` to see how patterns/PATHS are -interpreted. +interpreted. See :ref:`list_item_flags` in ``borg create`` for details. The archive being recreated is only removed after the operation completes. The archive that is built during the operation exists at the same time at @@ -150,4 +152,15 @@ With ``--target`` the original archive is not replaced, instead a new archive is When rechunking space usage can be substantial, expect at least the entire deduplicated size of the archives using the previous chunker params. When recompressing expect approx. (throughput / checkpoint-interval) in space usage, -assuming all chunks are recompressed. \ No newline at end of file +assuming all chunks are recompressed. + +If you recently ran borg check --repair and it had to fix lost chunks with all-zero +replacement chunks, please first run another backup for the same data and re-run +borg check --repair afterwards to heal any archives that had lost chunks which are +still generated from the input data. + +Important: running borg recreate to re-chunk will remove the chunks_healthy +metadata of all items with replacement chunks, so healing will not be possible +any more after re-chunking (it is also unlikely it would ever work: due to the +change of chunking parameters, the missing chunk likely will never be seen again +even if you still have the data that produced it). \ No newline at end of file diff --git a/docs/usage/serve.rst b/docs/usage/serve.rst index 7adc8f66d0..94ab987404 100644 --- a/docs/usage/serve.rst +++ b/docs/usage/serve.rst @@ -39,3 +39,33 @@ locations like ``/etc/environment`` or in the forced command itself (example bel has been introduced in v7.2. We recommend to use ``no-port-forwarding,no-X11-forwarding,no-pty,no-agent-forwarding,no-user-rc`` in this case. + + +SSH Configuration +~~~~~~~~~~~~~~~~~ +``borg serve``'s pipes (``stdin``/``stdout``/``stderr``) are connected to the ``sshd`` process on the server side. In the event that the SSH connection between ``borg serve`` and the client is disconnected or stuck abnormally (for example, due to a network outage), it can take a long time for ``sshd`` to notice the client is disconnected. In the meantime, ``sshd`` continues running, and as a result so does the ``borg serve`` process holding the lock on the repository. This can cause subsequent ``borg`` operations on the remote repository to fail with the error: ``Failed to create/acquire the lock``. + +In order to avoid this, it is recommended to perform the following additional SSH configuration: + +Either in the client side's ``~/.ssh/config`` file, or in the client's ``/etc/ssh/ssh_config`` file: +:: + + Host backupserver + ServerAliveInterval 10 + ServerAliveCountMax 30 + +Replacing ``backupserver`` with the hostname, FQDN or IP address of the borg server. + +This will cause the client to send a keepalive to the server every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the ssh client process will be terminated, causing the borg process to terminate gracefully. + +On the server side's ``sshd`` configuration file (typically ``/etc/ssh/sshd_config``): +:: + + ClientAliveInterval 10 + ClientAliveCountMax 30 + +This will cause the server to send a keep alive to the client every 10 seconds. If 30 consecutive keepalives are sent without a response (a time of 300 seconds), the server's sshd process will be terminated, causing the ``borg serve`` process to terminate gracefully and release the lock on the repository. + +If you then run borg commands with ``--lock-wait 600``, this gives sufficient time for the borg serve processes to terminate after the SSH connection is torn down after the 300 second wait for the keepalives to fail. + +You may, of course, modify the timeout values demonstrated above to values that suit your environment and use case. diff --git a/docs/usage/tar.rst b/docs/usage/tar.rst index 3b63afb4d8..dc79b708ef 100644 --- a/docs/usage/tar.rst +++ b/docs/usage/tar.rst @@ -11,11 +11,11 @@ Examples $ borg export-tar /path/to/repo::Monday Monday.tar.gz --exclude '*.so' # use higher compression level with gzip - $ borg export-tar testrepo::linux --tar-filter="gzip -9" Monday.tar.gz + $ borg export-tar --tar-filter="gzip -9" testrepo::linux Monday.tar.gz - # export a gzipped tar, but instead of storing it on disk, + # export a tar, but instead of storing it on disk, # upload it to a remote site using curl. - $ borg export-tar ... --tar-filter="gzip" - | curl --data-binary @- https://somewhere/to/POST + $ borg export-tar /path/to/repo::Monday - | curl --data-binary @- https://somewhere/to/POST # remote extraction via "tarpipe" $ borg export-tar /path/to/repo::Monday - | ssh somewhere "cd extracted; tar x" diff --git a/docs/usage/upgrade.rst.inc b/docs/usage/upgrade.rst.inc index eafa4362ae..9f94d2cf85 100644 --- a/docs/usage/upgrade.rst.inc +++ b/docs/usage/upgrade.rst.inc @@ -126,17 +126,17 @@ due to the cache resync. Upgrade should be able to resume if interrupted, although it will still iterate over all segments. If you want to start from scratch, use `borg delete` over the copied repository to -make sure the cache files are also removed: +make sure the cache files are also removed:: borg delete borg -Unless ``--inplace`` is specified, the upgrade process first -creates a backup copy of the repository, in -REPOSITORY.before-upgrade-DATETIME, using hardlinks. This takes -longer than in place upgrades, but is much safer and gives -progress information (as opposed to ``cp -al``). Once you are -satisfied with the conversion, you can safely destroy the -backup copy. +Unless ``--inplace`` is specified, the upgrade process first creates a backup +copy of the repository, in REPOSITORY.before-upgrade-DATETIME, using hardlinks. +This requires that the repository and its parent directory reside on same +filesystem so the hardlink copy can work. +This takes longer than in place upgrades, but is much safer and gives +progress information (as opposed to ``cp -al``). Once you are satisfied +with the conversion, you can safely destroy the backup copy. WARNING: Running the upgrade in place will make the current copy unusable with older version, with no way of going back diff --git a/docs/usage/usage_general.rst.inc b/docs/usage/usage_general.rst.inc new file mode 100644 index 0000000000..9334f3d216 --- /dev/null +++ b/docs/usage/usage_general.rst.inc @@ -0,0 +1,27 @@ +.. include:: general/positional-arguments.rst.inc + +.. include:: general/repository-urls.rst.inc + +.. include:: general/repository-locations.rst.inc + +.. include:: general/logging.rst.inc + +.. include:: general/return-codes.rst.inc + +.. _env_vars: + +.. include:: general/environment.rst.inc + +.. _file-systems: + +.. include:: general/file-systems.rst.inc + +.. include:: general/units.rst.inc + +.. include:: general/date-time.rst.inc + +.. include:: general/resources.rst.inc + +.. _platforms: + +.. include:: general/file-metadata.rst.inc diff --git a/docs/usage/with-lock.rst.inc b/docs/usage/with-lock.rst.inc index fc45411641..6f4fc741af 100644 --- a/docs/usage/with-lock.rst.inc +++ b/docs/usage/with-lock.rst.inc @@ -60,5 +60,6 @@ code as borg's return code. .. note:: If you copy a repository with the lock held, the lock will be present in - the copy, obviously. Thus, before using borg on the copy, you need to - use "borg break-lock" on it. \ No newline at end of file + the copy. Thus, before using borg on the copy from a different host, + you need to use "borg break-lock" on the copied repository, because + Borg is cautious and does not automatically remove stale locks made by a different host. \ No newline at end of file diff --git a/docs/usage_general.rst.inc b/docs/usage_general.rst.inc index 3c61914388..88613e1f45 100644 --- a/docs/usage_general.rst.inc +++ b/docs/usage_general.rst.inc @@ -1,421 +1,21 @@ -Repository URLs -~~~~~~~~~~~~~~~ +.. include:: usage/general/positional-arguments.rst.inc -**Local filesystem** (or locally mounted network filesystem): +.. include:: usage/general/repository-urls.rst.inc -``/path/to/repo`` - filesystem path to repo directory, absolute path +.. include:: usage/general/repository-locations.rst.inc -``path/to/repo`` - filesystem path to repo directory, relative path +.. include:: usage/general/logging.rst.inc -Also, stuff like ``~/path/to/repo`` or ``~other/path/to/repo`` works (this is -expanded by your shell). +.. include:: usage/general/return-codes.rst.inc -Note: you may also prepend a ``file://`` to a filesystem path to get URL style. +.. include:: usage/general/environment.rst.inc -**Remote repositories** accessed via ssh user@host: +.. include:: usage/general/file-systems.rst.inc -``user@host:/path/to/repo`` - remote repo, absolute path +.. include:: usage/general/units.rst.inc -``ssh://user@host:port/path/to/repo`` - same, alternative syntax, port can be given +.. include:: usage/general/date-time.rst.inc +.. include:: usage/general/resources.rst.inc -**Remote repositories with relative paths** can be given using this syntax: - -``user@host:path/to/repo`` - path relative to current directory - -``user@host:~/path/to/repo`` - path relative to user's home directory - -``user@host:~other/path/to/repo`` - path relative to other's home directory - -Note: giving ``user@host:/./path/to/repo`` or ``user@host:/~/path/to/repo`` or -``user@host:/~other/path/to/repo`` is also supported, but not required here. - - -**Remote repositories with relative paths, alternative syntax with port**: - -``ssh://user@host:port/./path/to/repo`` - path relative to current directory - -``ssh://user@host:port/~/path/to/repo`` - path relative to user's home directory - -``ssh://user@host:port/~other/path/to/repo`` - path relative to other's home directory - - -If you frequently need the same repo URL, it is a good idea to set the -``BORG_REPO`` environment variable to set a default for the repo URL: - -:: - - export BORG_REPO='ssh://user@host:port/path/to/repo' - -Then just leave away the repo URL if only a repo URL is needed and you want -to use the default - it will be read from BORG_REPO then. - -Use ``::`` syntax to give the repo URL when syntax requires giving a positional -argument for the repo (e.g. ``borg mount :: /mnt``). - - -Repository / Archive Locations -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Many commands want either a repository (just give the repo URL, see above) or -an archive location, which is a repo URL followed by ``::archive_name``. - -Archive names must not contain the ``/`` (slash) character. For simplicity, -maybe also avoid blanks or other characters that have special meaning on the -shell or in a filesystem (borg mount will use the archive name as directory -name). - -If you have set BORG_REPO (see above) and an archive location is needed, use -``::archive_name`` - the repo URL part is then read from BORG_REPO. - - -Type of log output -~~~~~~~~~~~~~~~~~~ - -The log level of the builtin logging configuration defaults to WARNING. -This is because we want Borg to be mostly silent and only output -warnings, errors and critical messages, unless output has been requested -by supplying an option that implies output (e.g. ``--list`` or ``--progress``). - -Log levels: DEBUG < INFO < WARNING < ERROR < CRITICAL - -Use ``--debug`` to set DEBUG log level - -to get debug, info, warning, error and critical level output. - -Use ``--info`` (or ``-v`` or ``--verbose``) to set INFO log level - -to get info, warning, error and critical level output. - -Use ``--warning`` (default) to set WARNING log level - -to get warning, error and critical level output. - -Use ``--error`` to set ERROR log level - -to get error and critical level output. - -Use ``--critical`` to set CRITICAL log level - -to get critical level output. - -While you can set misc. log levels, do not expect that every command will -give different output on different log levels - it's just a possibility. - -.. warning:: Options ``--critical`` and ``--error`` are provided for completeness, - their usage is not recommended as you might miss important information. - -Return codes -~~~~~~~~~~~~ - -Borg can exit with the following return codes (rc): - -=========== ======= -Return code Meaning -=========== ======= -0 success (logged as INFO) -1 warning (operation reached its normal end, but there were warnings -- - you should check the log, logged as WARNING) -2 error (like a fatal error, a local or remote exception, the operation - did not reach its normal end, logged as ERROR) -128+N killed by signal N (e.g. 137 == kill -9) -=========== ======= - -If you use ``--show-rc``, the return code is also logged at the indicated -level as the last log entry. - -.. _env_vars: - -Environment Variables -~~~~~~~~~~~~~~~~~~~~~ - -Borg uses some environment variables for automation: - -General: - BORG_REPO - When set, use the value to give the default repository location. If a command needs an archive - parameter, you can abbreviate as ``::archive``. If a command needs a repository parameter, you - can either leave it away or abbreviate as ``::``, if a positional parameter is required. - BORG_PASSPHRASE - When set, use the value to answer the passphrase question for encrypted repositories. - It is used when a passphrase is needed to access an encrypted repo as well as when a new - passphrase should be initially set when initializing an encrypted repo. - See also BORG_NEW_PASSPHRASE. - BORG_PASSCOMMAND - When set, use the standard output of the command (trailing newlines are stripped) to answer the - passphrase question for encrypted repositories. - It is used when a passphrase is needed to access an encrypted repo as well as when a new - passphrase should be initially set when initializing an encrypted repo. - If BORG_PASSPHRASE is also set, it takes precedence. - See also BORG_NEW_PASSPHRASE. - BORG_NEW_PASSPHRASE - When set, use the value to answer the passphrase question when a **new** passphrase is asked for. - This variable is checked first. If it is not set, BORG_PASSPHRASE and BORG_PASSCOMMAND will also - be checked. - Main usecase for this is to fully automate ``borg change-passphrase``. - BORG_DISPLAY_PASSPHRASE - When set, use the value to answer the "display the passphrase for verification" question when defining a new passphrase for encrypted repositories. - BORG_HOSTNAME_IS_UNIQUE=no - Borg assumes that it can derive a unique hostname / identity (see ``borg debug info``). - If this is not the case or you do not want Borg to automatically remove stale locks, - set this to *no*. - BORG_LOGGING_CONF - When set, use the given filename as INI_-style logging configuration. - BORG_RSH - When set, use this command instead of ``ssh``. This can be used to specify ssh options, such as - a custom identity file ``ssh -i /path/to/private/key``. See ``man ssh`` for other options. - BORG_REMOTE_PATH - When set, use the given path as borg executable on the remote (defaults to "borg" if unset). - Using ``--remote-path PATH`` commandline option overrides the environment variable. - BORG_FILES_CACHE_TTL - When set to a numeric value, this determines the maximum "time to live" for the files cache - entries (default: 20). The files cache is used to quickly determine whether a file is unchanged. - The FAQ explains this more detailed in: :ref:`always_chunking` - TMPDIR - where temporary files are stored (might need a lot of temporary space for some operations) - -Some automatic "answerers" (if set, they automatically answer confirmation questions): - BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=no (or =yes) - For "Warning: Attempting to access a previously unknown unencrypted repository" - BORG_RELOCATED_REPO_ACCESS_IS_OK=no (or =yes) - For "Warning: The repository at location ... was previously located at ..." - BORG_CHECK_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) - For "Warning: 'check --repair' is an experimental feature that might result in data loss." - BORG_DELETE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) - For "You requested to completely DELETE the repository *including* all archives it contains:" - BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING=NO (or =YES) - For "recreate is an experimental feature." - - Note: answers are case sensitive. setting an invalid answer value might either give the default - answer or ask you interactively, depending on whether retries are allowed (they by default are - allowed). So please test your scripts interactively before making them a non-interactive script. - -Directories and files: - BORG_KEYS_DIR - Default to '~/.config/borg/keys'. This directory contains keys for encrypted repositories. - BORG_KEY_FILE - When set, use the given filename as repository key file. - BORG_SECURITY_DIR - Default to '~/.config/borg/security'. This directory contains information borg uses to - track its usage of NONCES ("numbers used once" - usually in encryption context) and other - security relevant data. - BORG_CACHE_DIR - Default to '~/.cache/borg'. This directory contains the local cache and might need a lot - of space for dealing with big repositories). - -Building: - BORG_OPENSSL_PREFIX - Adds given OpenSSL header file directory to the default locations (setup.py). - BORG_LZ4_PREFIX - Adds given LZ4 header file directory to the default locations (setup.py). - BORG_LIBB2_PREFIX - Adds given prefix directory to the default locations. If a 'include/blake2.h' is found Borg - will be linked against the system libb2 instead of a bundled implementation. (setup.py) - - -Please note: - -- be very careful when using the "yes" sayers, the warnings with prompt exist for your / your data's security/safety -- also be very careful when putting your passphrase into a script, make sure it has appropriate file permissions - (e.g. mode 600, root:root). - - -.. _INI: https://docs.python.org/3.4/library/logging.config.html#configuration-file-format - -.. _file-systems: - -File systems -~~~~~~~~~~~~ - -We strongly recommend against using Borg (or any other database-like -software) on non-journaling file systems like FAT, since it is not -possible to assume any consistency in case of power failures (or a -sudden disconnect of an external drive or similar failures). - -While Borg uses a data store that is resilient against these failures -when used on journaling file systems, it is not possible to guarantee -this with some hardware -- independent of the software used. We don't -know a list of affected hardware. - -If you are suspicious whether your Borg repository is still consistent -and readable after one of the failures mentioned above occurred, run -``borg check --verify-data`` to make sure it is consistent. - -.. rubric:: Requirements for Borg repository file systems - -- Long file names -- At least three directory levels with short names -- Typically, file sizes up to a few hundred MB. - Large repositories may require large files (>2 GB). -- Up to 1000 files per directory (10000 for repositories initialized with Borg 1.0) -- mkdir(2) should be atomic, since it is used for locking -- Hardlinks are needed for :ref:`borg_upgrade` ``--inplace`` - -Units -~~~~~ - -To display quantities, Borg takes care of respecting the -usual conventions of scale. Disk sizes are displayed in `decimal -`_, using powers of ten (so -``kB`` means 1000 bytes). For memory usage, `binary prefixes -`_ are used, and are -indicated using the `IEC binary prefixes -`_, -using powers of two (so ``KiB`` means 1024 bytes). - -Date and Time -~~~~~~~~~~~~~ - -We format date and time conforming to ISO-8601, that is: YYYY-MM-DD and -HH:MM:SS (24h clock). - -For more information about that, see: https://xkcd.com/1179/ - -Unless otherwise noted, we display local date and time. -Internally, we store and process date and time as UTC. - -Resource Usage -~~~~~~~~~~~~~~ - -Borg might use a lot of resources depending on the size of the data set it is dealing with. - -If one uses Borg in a client/server way (with a ssh: repository), -the resource usage occurs in part on the client and in another part on the -server. - -If one uses Borg as a single process (with a filesystem repo), -all the resource usage occurs in that one process, so just add up client + -server to get the approximate resource usage. - -CPU client: - borg create: does chunking, hashing, compression, crypto (high CPU usage) - chunks cache sync: quite heavy on CPU, doing lots of hashtable operations. - borg extract: crypto, decompression (medium to high CPU usage) - borg check: similar to extract, but depends on options given. - borg prune / borg delete archive: low to medium CPU usage - borg delete repo: done on the server - It won't go beyond 100% of 1 core as the code is currently single-threaded. - Especially higher zlib and lzma compression levels use significant amounts - of CPU cycles. Crypto might be cheap on the CPU (if hardware accelerated) or - expensive (if not). - -CPU server: - It usually doesn't need much CPU, it just deals with the key/value store - (repository) and uses the repository index for that. - - borg check: the repository check computes the checksums of all chunks - (medium CPU usage) - borg delete repo: low CPU usage - -CPU (only for client/server operation): - When using borg in a client/server way with a ssh:-type repo, the ssh - processes used for the transport layer will need some CPU on the client and - on the server due to the crypto they are doing - esp. if you are pumping - big amounts of data. - -Memory (RAM) client: - The chunks index and the files index are read into memory for performance - reasons. Might need big amounts of memory (see below). - Compression, esp. lzma compression with high levels might need substantial - amounts of memory. - -Memory (RAM) server: - The server process will load the repository index into memory. Might need - considerable amounts of memory, but less than on the client (see below). - -Chunks index (client only): - Proportional to the amount of data chunks in your repo. Lots of chunks - in your repo imply a big chunks index. - It is possible to tweak the chunker params (see create options). - -Files index (client only): - Proportional to the amount of files in your last backups. Can be switched - off (see create options), but next backup might be much slower if you do. - The speed benefit of using the files cache is proportional to file size. - -Repository index (server only): - Proportional to the amount of data chunks in your repo. Lots of chunks - in your repo imply a big repository index. - It is possible to tweak the chunker params (see create options) to - influence the amount of chunks being created. - -Temporary files (client): - Reading data and metadata from a FUSE mounted repository will consume up to - the size of all deduplicated, small chunks in the repository. Big chunks - won't be locally cached. - -Temporary files (server): - None. - -Cache files (client only): - Contains the chunks index and files index (plus a collection of single- - archive chunk indexes which might need huge amounts of disk space, - depending on archive count and size - see FAQ about how to reduce). - -Network (only for client/server operation): - If your repository is remote, all deduplicated (and optionally compressed/ - encrypted) data of course has to go over the connection (``ssh://`` repo url). - If you use a locally mounted network filesystem, additionally some copy - operations used for transaction support also go over the connection. If - you backup multiple sources to one target repository, additional traffic - happens for cache resynchronization. - -.. _platforms: - -Support for file metadata -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Besides regular file and directory structures, Borg can preserve - -* symlinks (stored as symlink, the symlink is not followed) -* special files: - - * character and block device files (restored via mknod) - * FIFOs ("named pipes") - * special file *contents* can be backed up in ``--read-special`` mode. - By default the metadata to create them with mknod(2), mkfifo(2) etc. is stored. -* hardlinked regular files, devices, FIFOs (considering all items in the same archive) -* timestamps in nanosecond precision: mtime, atime, ctime -* permissions: - - * IDs of owning user and owning group - * names of owning user and owning group (if the IDs can be resolved) - * Unix Mode/Permissions (u/g/o permissions, suid, sgid, sticky) - -On some platforms additional features are supported: - -.. Yes/No's are grouped by reason/mechanism/reference. - -+------------------+----------+-----------+------------+ -| Platform | ACLs | xattr | Flags | -| | [#acls]_ | [#xattr]_ | [#flags]_ | -+==================+==========+===========+============+ -| Linux | Yes | Yes | Yes [1]_ | -+------------------+----------+-----------+------------+ -| Mac OS X | Yes | Yes | Yes (all) | -+------------------+----------+-----------+ | -| FreeBSD | Yes | Yes | | -+------------------+----------+-----------+ | -| OpenBSD | n/a | n/a | | -+------------------+----------+-----------+ | -| NetBSD | n/a | No [2]_ | | -+------------------+----------+-----------+------------+ -| Solaris 11 | No [3]_ | n/a | -+------------------+ | | -| OpenIndiana | | | -+------------------+----------+-----------+------------+ -| Windows (cygwin) | No [4]_ | No | No | -+------------------+----------+-----------+------------+ - -Other Unix-like operating systems may work as well, but have not been tested at all. - -Note that most of the platform-dependent features also depend on the file system. -For example, ntfs-3g on Linux isn't able to convey NTFS ACLs. - -.. [1] Only "nodump", "immutable", "compressed" and "append" are supported. - Feature request :issue:`618` for more flags. -.. [2] Feature request :issue:`1332` -.. [3] Feature request :issue:`1337` -.. [4] Cygwin tries to map NTFS ACLs to permissions with varying degress of success. - -.. [#acls] The native access control list mechanism of the OS. This normally limits access to - non-native ACLs. For example, NTFS ACLs aren't completely accessible on Linux with ntfs-3g. -.. [#xattr] extended attributes; key-value pairs attached to a file, mainly used by the OS. - This includes resource forks on Mac OS X. -.. [#flags] aka *BSD flags*. The Linux set of flags [1]_ is portable across platforms. - The BSDs define additional flags. +.. include:: usage/general/file-metadata.rst.inc diff --git a/requirements.d/development.lock.txt b/requirements.d/development.lock.txt new file mode 100644 index 0000000000..875aaa17d1 --- /dev/null +++ b/requirements.d/development.lock.txt @@ -0,0 +1,13 @@ +setuptools==50.3.0 +setuptools-scm==4.1.2 +pip==20.2.3 +virtualenv==20.0.33 +importlib-metadata==1.7.0 +pluggy==0.13.1 +tox==3.20.0 +pytest==6.1.1 +pytest-xdist==2.1.0 +pytest-cov==2.10.1 +pytest-benchmark==3.2.3 +Cython==0.29.21 +twine==1.15.0 diff --git a/requirements.d/development.txt b/requirements.d/development.txt index 5805cb4a11..5a9a64f1b8 100644 --- a/requirements.d/development.txt +++ b/requirements.d/development.txt @@ -8,3 +8,4 @@ pytest-xdist pytest-cov pytest-benchmark Cython +twine diff --git a/requirements.d/fuse.txt b/requirements.d/fuse.txt index 0df0f33813..a8a479c695 100644 --- a/requirements.d/fuse.txt +++ b/requirements.d/fuse.txt @@ -1,4 +1,3 @@ # low-level FUSE support library for "borg mount" # please see the comments in setup.py about llfuse. -llfuse<2.0 - +llfuse >=1.3.4 diff --git a/scripts/borg.exe.spec b/scripts/borg.exe.spec index ea86a91d4d..bdde103036 100644 --- a/scripts/borg.exe.spec +++ b/scripts/borg.exe.spec @@ -3,7 +3,9 @@ import os, sys -basepath = '/vagrant/borg/borg' +# Note: SPEC contains the spec file argument given to pyinstaller +here = os.path.dirname(os.path.abspath(SPEC)) +basepath = os.path.abspath(os.path.join(here, '..')) block_cipher = None @@ -41,15 +43,24 @@ exe = EXE(pyz, upx=True, console=True ) -if False: - # Enable this block to build a directory-based binary instead of - # a packed single file. This allows to easily look at all included - # files (e.g. without having to strace or halt the built binary - # and introspect /tmp). - coll = COLLECT(exe, - a.binaries, - a.zipfiles, - a.datas, - strip=False, - upx=True, - name='borg-dir') +# Build a directory-based binary in addition to a packed +# single file. This allows one to easily look at all included +# files (e.g. without having to strace or halt the built binary +# and introspect /tmp). Also avoids unpacking all libs when +# running the app, which is better for app signing on various OS. +slim_exe = EXE(pyz, + a.scripts, + exclude_binaries=True, + name='borg.exe', + debug=False, + strip=False, + upx=False, + console=True) + +coll = COLLECT(slim_exe, + a.binaries, + a.zipfiles, + a.datas, + strip=False, + upx=False, + name='borg-dir') diff --git a/scripts/errorlist.py b/scripts/errorlist.py index bd33faf441..c4a0e7a0e5 100755 --- a/scripts/errorlist.py +++ b/scripts/errorlist.py @@ -2,7 +2,7 @@ from textwrap import indent -import borg.archiver +import borg.archiver # noqa: F401 - need import to get Error and ErrorWithTraceback subclasses. from borg.helpers import Error, ErrorWithTraceback classes = Error.__subclasses__() + ErrorWithTraceback.__subclasses__() diff --git a/scripts/fuzz-cache-sync/testcase_dir/test_simple b/scripts/fuzz-cache-sync/testcase_dir/test_simple index 0bf5a0ea16..d5f6670c15 100644 Binary files a/scripts/fuzz-cache-sync/testcase_dir/test_simple and b/scripts/fuzz-cache-sync/testcase_dir/test_simple differ diff --git a/scripts/glibc_check.py b/scripts/glibc_check.py index 02be4aac4e..eb198ed569 100755 --- a/scripts/glibc_check.py +++ b/scripts/glibc_check.py @@ -41,7 +41,7 @@ def main(): overall_versions.add(requires_glibc) if verbose: print("%s %s" % (filename, format_version(requires_glibc))) - except subprocess.CalledProcessError as e: + except subprocess.CalledProcessError: if verbose: print("%s errored." % filename) diff --git a/scripts/release b/scripts/release deleted file mode 100755 index a67a77560f..0000000000 --- a/scripts/release +++ /dev/null @@ -1,2 +0,0 @@ -python setup.py register sdist upload --identity="Thomas Waldmann" --sign - diff --git a/scripts/sdist-sign b/scripts/sdist-sign new file mode 100755 index 0000000000..50758a30a4 --- /dev/null +++ b/scripts/sdist-sign @@ -0,0 +1,20 @@ +#!/bin/bash + +R=$1 + +if [ "$R" = "" ]; then + echo "Usage: sdist-sign 1.2.3" + exit +fi + +if [ "$QUBES_GPG_DOMAIN" = "" ]; then + GPG=gpg +else + GPG=qubes-gpg-client-wrapper +fi + +python setup.py sdist + +D=dist/borgbackup-$R.tar.gz + +$GPG --detach-sign --local-user "Thomas Waldmann" --armor --output $D.asc $D diff --git a/scripts/shell_completions/bash/borg b/scripts/shell_completions/bash/borg new file mode 100644 index 0000000000..402d31c59e --- /dev/null +++ b/scripts/shell_completions/bash/borg @@ -0,0 +1,179 @@ +# Completions for borg +# https://www.borgbackup.org/ +# Note: +# Listing archives works on password protected repositories only if $BORG_PASSPHRASE is set. +# Install: +# Copy this file to /usr/share/bash-completion/completions/ or /etc/bash_completion.d/ + +_borg() +{ + compopt -o default + COMPREPLY=() + local cur="${COMP_WORDS[COMP_CWORD]}" + local prev="${COMP_WORDS[COMP_CWORD-1]}" + local prevprev="${COMP_WORDS[COMP_CWORD-2]}" + local common_opts="-h --help --version --critical --error --warning --info -v --verbose --debug --debug-topic -p --progress --log-json --lock-wait --show-version --show-rc --umask --remote-path --remote-ratelimit --consider-part-files --debug-profile --rsh" + local opts="${common_opts}" + + # Commands + if [[ ${COMP_CWORD} == 1 ]] ; then + local borg_commands="init create extract check rename list diff delete prune info mount umount key serve upgrade recreate export-tar with-lock break-lock config benchmark help" + COMPREPLY=( $(compgen -W "${borg_commands}" -- ${cur}) ) + compopt +o default + return 0 + fi + + case "${prev}" in + 'key') + COMPREPLY=( $(compgen -W "import export change-passphrase" -- ${cur}) ) + return 0 + ;; + 'benchmark') + COMPREPLY=( $(compgen -W "crud" -- ${cur}) ) + return 0 + ;; + 'help') + COMPREPLY=( $(compgen -W "patterns placeholders compression" -- ${cur}) ) + return 0 + ;; + '--encryption' | '-e') + local encryption_modes="none keyfile keyfile-blake2 repokey repokey-blake2 authenticated authenticated-blake2" + COMPREPLY=( $(compgen -W "${encryption_modes}" -- ${cur}) ) + return 0 + ;; + '--files-cache') + local files_cache_mode="ctime,size,inode mtime,size,inode ctime,size mtime,size rechunk,ctime rechunk,mtime disabled" + COMPREPLY=( $(compgen -W "${files_cache_mode}" -- ${cur}) ) + return 0 + ;; + '--compression' | '-C') + local compression_methods="none auto lz4 zstd,1 zstd,2 zstd,3 zstd,4 zstd,5 zstd,6 zstd,7 zstd,8 zstd,9 zstd,10 zstd,11 zstd,12 zstd,13 zstd,14 zstd,15 zstd,16 zstd,17 zstd,18 zstd,19 zstd,20 zstd,21 zstd,22 zlib,1 zlib,2 zlib,3 zlib,4 zlib,5 zlib,6 zlib,7 zlib,8 zlib,9 lzma,0 lzma,1 lzma,2 lzma,3 lzma,4 lzma,5 lzma,6 lzma,7 lzma,8 lzma,9" + COMPREPLY=( $(compgen -W "${compression_methods}" -- ${cur}) ) + return 0 + ;; + '--sort-by') + local sort_keys="timestamp name id" + COMPREPLY=( $(compgen -W "${sort_keys}" -- ${cur}) ) + return 0 + ;; + '-o') + # FIXME This list is probably not full, but I tried to pick only those that are relevant to borg mount -o: + local fuse_options="ac_attr_timeout= allow_damaged_files allow_other allow_root attr_timeout= auto auto_cache auto_unmount default_permissions entry_timeout= gid= group_id= kernel_cache max_read= negative_timeout= noauto noforget remember= remount rootmode= uid= umask= user user_id= versions" + COMPREPLY=( $(compgen -W "${fuse_options}" -- ${cur}) ) + return 0 + ;; + '--recompress') + local recompress_when="if-different always never" + COMPREPLY=( $(compgen -W "${recompress_when}" -- ${cur}) ) + return 0 + ;; + esac + + if [[ ${cur} == -* ]] ; then + case "${COMP_LINE}" in + *' init '*) + local opts="-e --encryption --append-only --storage-quota --make-parent-dirs ${common_opts}" + ;; + *' create '*) + local opts="-n --dry-run -s --stats --list --filter --json --no-cache-sync --stdin-name --stdin-user --stdin-group --stdin-mode -e --exclude --exclude-from --pattern --patterns-from --exclude-caches --exclude-if-present --keep-exclude-tags --keep-tag-files --exclude-nodump -x --one-file-system --numeric-owner --noatime --noctime --nobirthtime --nobsdflags --ignore-inode --files-cache --read-special --comment --timestamp -c --checkpoint-interval --chunker-params -C --compression ${common_opts}" + ;; + *' extract '*) + local opts="--list -n --dry-run --numeric-owner --nobsdflags --stdout --sparse -e --exclude --exclude-from --pattern --patterns-from --strip-components ${common_opts}" + ;; + *' check '*) + local opts="--repository-only --archives-only --verify-data --repair --save-space -P --prefix -a --glob-archives --sort-by --first --last ${common_opts}" + ;; + # rename + # no specific options + *" list "*) + local opts="--short --list-format --format --json --json-lines -P --prefix -a --glob-archives --sort-by --first --last -e --exclude --exclude-from --pattern --patterns-from ${common_opts}" + ;; + *' diff '*) + local opts="--numeric-owner --same-chunker-params --sort -e --exclude --exclude-from --pattern --patterns-from ${common_opts}" + ;; + *' delete '*) + local opts="-n --dry-run -s --stats --cache-only --force --save-space -P --prefix -a --glob-archives --sort-by --first --last ${common_opts}" + ;; + *' prune '*) + local opts="-n --dry-run --force -s --stats --list --keep-within --keep-last --keep-secondly --keep-minutely -H --keep-hourly -d --keep-daily -w --keep-weekly -m --keep-monthly -y --keep-yearly --save-space -P --prefix -a --glob-archives ${common_opts}" + ;; + *' info '*) + local opts="--json -P --prefix -a --glob-archives --sort-by --first --last ${common_opts}" + ;; + *' mount '*) + local opts="-f --foreground -o -P --prefix -a --glob-archives --sort-by --first --last -e --exclude --exclude-from --pattern --patterns-from --strip-components ${common_opts}" + ;; + # umount + # no specific options + # key change-passphrase + # no specific options + *' export '*) + local opts="--paper --qr-html ${common_opts}" + ;; + *' import '*) + local opts="--paper ${common_opts}" + ;; + *' upgrade '*) + local opts="-n --dry-run --inplace --force --tam --disable-tam ${common_opts}" + ;; + *' recreate '*) + local opts="--list --filter -n dry-run -s stats -e exclude --exclude-from --pattern --patterns-from --exclude-caches --exclude-if-present --keep-exclude-tags --keep-tag-files --target -c checkpoint-interval --comment --timestamp --timestamp -C compression --recompress --chunker-params ${common_opts}" + ;; + *' export-tar '*) + local opts="--tar-filter --list -e exclude --exclude-from --pattern --patterns-from --strip-components ${common_opts}" + ;; + *' serve '*) + local opts="--restrict-to-path --restrict-to-repository --append-only --storage-quota ${common_opts}" + ;; + *' config '*) + local opts="-c --cache -d --delete --list ${common_opts}" + ;; + # with-lock + # no specific options + # break-lock + # no specific options + # benchmark crud + # no specific options + esac + + COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) + return 0 + fi + + # Get the repository name if available + # If there is a space before the "::" it means that no repository name was typed, + # so probably $BORG_REPO was set and we can still list the archives. + local repository_name="${COMP_LINE%%::*}" + repository_name=${repository_name##* } + + # Listing archives. + # Since "::" is treated as separate word in Bash, + # it is $cur when the cursor is right behind it + # and $prev if the user has started to type an archive name. + local typed_word=${cur} + local -i list_archives=0 + if [[ ${cur} == "::" ]] ; then + list_archives=1 + typed_word="" + fi + if [[ ${prev} == "::" ]] ; then + list_archives=1 + fi + # Second archive listing for borg diff + if [[ ${COMP_LINE} =~ ^.*\ diff\ .*::[^\ ]+\ ${cur}$ ]] ; then + list_archives=1 + fi + # Additional archive listing for borg delete + if [[ ${COMP_LINE} =~ ^.*\ delete\ .*::[^\ ]+.*${cur}$ ]] ; then + list_archives=1 + fi + if (( $list_archives )) ; then + local archives=$(borg list --short "${repository_name}" 2>/dev/null) + COMPREPLY=( $(compgen -W "${archives}" -- "${typed_word}" ) ) + return 0 + fi + + return 0 +} + +complete -F _borg borg diff --git a/scripts/shell_completions/fish/borg.fish b/scripts/shell_completions/fish/borg.fish new file mode 100644 index 0000000000..65c17266a6 --- /dev/null +++ b/scripts/shell_completions/fish/borg.fish @@ -0,0 +1,396 @@ +# Completions for borg +# https://www.borgbackup.org/ +# Note: +# Listing archives works on password protected repositories only if $BORG_PASSPHRASE is set. +# Install: +# Copy this file to /usr/share/fish/vendor_completions.d/ + +# Commands + +complete -c borg -f -n __fish_is_first_token -a 'init' -d 'Initialize an empty repository' +complete -c borg -f -n __fish_is_first_token -a 'create' -d 'Create new archive' +complete -c borg -f -n __fish_is_first_token -a 'extract' -d 'Extract archive contents' +complete -c borg -f -n __fish_is_first_token -a 'check' -d 'Check repository consistency' +complete -c borg -f -n __fish_is_first_token -a 'rename' -d 'Rename an existing archive' +complete -c borg -f -n __fish_is_first_token -a 'list' -d 'List archive or repository contents' +complete -c borg -f -n __fish_is_first_token -a 'diff' -d 'Find differences between archives' +complete -c borg -f -n __fish_is_first_token -a 'delete' -d 'Delete a repository or archive' +complete -c borg -f -n __fish_is_first_token -a 'prune' -d 'Prune repository archives' +complete -c borg -f -n __fish_is_first_token -a 'info' -d 'Show archive details' +complete -c borg -f -n __fish_is_first_token -a 'mount' -d 'Mount archive or a repository' +complete -c borg -f -n __fish_is_first_token -a 'umount' -d 'Un-mount the mounted archive' + +function __fish_borg_seen_key + if __fish_seen_subcommand_from key + and not __fish_seen_subcommand_from import export change-passphrase + return 0 + end + return 1 +end +complete -c borg -f -n __fish_is_first_token -a 'key' -d 'Manage a repository key' +complete -c borg -f -n __fish_borg_seen_key -a 'import' -d 'Import a repository key' +complete -c borg -f -n __fish_borg_seen_key -a 'export' -d 'Export a repository key' +complete -c borg -f -n __fish_borg_seen_key -a 'change-passphrase' -d 'Change key file passphrase' + +complete -c borg -f -n __fish_is_first_token -a 'serve' -d 'Start in server mode' +complete -c borg -f -n __fish_is_first_token -a 'upgrade' -d 'Upgrade a repository' +complete -c borg -f -n __fish_is_first_token -a 'recreate' -d 'Recreate contents of existing archives' +complete -c borg -f -n __fish_is_first_token -a 'export-tar' -d 'Create tarball from an archive' +complete -c borg -f -n __fish_is_first_token -a 'with-lock' -d 'Run a command while repository lock held' +complete -c borg -f -n __fish_is_first_token -a 'break-lock' -d 'Break the repository lock' +complete -c borg -f -n __fish_is_first_token -a 'config' -d 'Get/set options in repo/cache config' + +function __fish_borg_seen_benchmark + if __fish_seen_subcommand_from benchmark + and not __fish_seen_subcommand_from crud + return 0 + end + return 1 +end +complete -c borg -f -n __fish_is_first_token -a 'benchmark' -d 'Benchmark borg operations' +complete -c borg -f -n __fish_borg_seen_benchmark -a 'crud' -d 'Benchmark borg CRUD operations' + +function __fish_borg_seen_help + if __fish_seen_subcommand_from help + and not __fish_seen_subcommand_from patterns placeholders compression + return 0 + end + return 1 +end +complete -c borg -f -n __fish_is_first_token -a 'help' -d 'Miscellaneous Help' +complete -c borg -f -n __fish_borg_seen_help -a 'patterns' -d 'Help for patterns' +complete -c borg -f -n __fish_borg_seen_help -a 'placeholders' -d 'Help for placeholders' +complete -c borg -f -n __fish_borg_seen_help -a 'compression' -d 'Help for compression' + +# Common options +complete -c borg -f -s h -l 'help' -d 'Show help information' +complete -c borg -f -l 'version' -d 'Show version information' +complete -c borg -f -l 'critical' -d 'Log level CRITICAL' +complete -c borg -f -l 'error' -d 'Log level ERROR' +complete -c borg -f -l 'warning' -d 'Log level WARNING (default)' +complete -c borg -f -l 'info' -d 'Log level INFO' +complete -c borg -f -s v -l 'verbose' -d 'Log level INFO' +complete -c borg -f -l 'debug' -d 'Log level DEBUG' +complete -c borg -f -l 'debug-topic' -d 'Enable TOPIC debugging' +complete -c borg -f -s p -l 'progress' -d 'Show progress information' +complete -c borg -f -l 'log-json' -d 'Output one JSON object per log line' +complete -c borg -f -l 'lock-wait' -d 'Wait for lock max N seconds [1]' +complete -c borg -f -l 'show-version' -d 'Log version information' +complete -c borg -f -l 'show-rc' -d 'Log the return code' +complete -c borg -f -l 'umask' -d 'Set umask to M [0077]' +complete -c borg -l 'remote-path' -d 'Use PATH as remote borg executable' +complete -c borg -f -l 'remote-ratelimit' -d 'Set remote network upload RATE limit' +complete -c borg -f -l 'consider-part-files' -d 'Treat part files like normal files' +complete -c borg -l 'debug-profile' -d 'Write execution profile into FILE' +complete -c borg -l 'rsh' -d 'Use COMMAND instead of ssh' + +# borg init options +set -l encryption_modes "none keyfile keyfile-blake2 repokey repokey-blake2 authenticated authenticated-blake2" +complete -c borg -f -s e -l 'encryption' -d 'Encryption key MODE' -a "$encryption_modes" -n "__fish_seen_subcommand_from init" +complete -c borg -f -l 'append-only' -d 'Create an append-only mode repository' -n "__fish_seen_subcommand_from init" +complete -c borg -f -l 'storage-quota' -d 'Set storage QUOTA of the repository' -n "__fish_seen_subcommand_from init" +complete -c borg -f -l 'make-parent-dirs' -d 'Create parent directories' -n "__fish_seen_subcommand_from init" + +# borg create options +complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from create" +complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'filter' -d 'Only items with given STATUSCHARS' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'json' -d 'Print verbose stats as json' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'no-cache-sync' -d 'Do not synchronize the cache' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'stdin-name' -d 'Use NAME in archive for stdin data' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'stdin-user' -d 'Set user USER in archive for stdin data [root]' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'stdin-group' -d 'Set group GROUP in archive for stdin data [root]' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'stdin-mode' -d 'Set mode to M in archive for stdin data [0660]' -n "__fish_seen_subcommand_from create" + +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from create" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from create" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'exclude-caches' -d 'Exclude directories tagged as cache' -n "__fish_seen_subcommand_from create" +complete -c borg -l 'exclude-if-present' -d 'Exclude directories that contain FILENAME' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'keep-exclude-tags' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'keep-tag-files' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'exclude-nodump' -d 'Exclude files flagged NODUMP' -n "__fish_seen_subcommand_from create" +# Filesystem options +complete -c borg -f -s x -l 'one-file-system' -d 'Stay in the same file system' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'numeric-owner' -d 'Only store numeric user:group identifiers' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'noatime' -d 'Do not store atime' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'noctime' -d 'Do not store ctime' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'nobirthtime' -d 'Do not store creation date' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'nobsdflags' -d 'Do not store bsdflags' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'ignore-inode' -d 'Ignore inode data in file metadata cache' -n "__fish_seen_subcommand_from create" +set -l files_cache_mode "ctime,size,inode mtime,size,inode ctime,size mtime,size rechunk,ctime rechunk,mtime disabled" +complete -c borg -f -l 'files-cache' -d 'Operate files cache in MODE' -a "$files_cache_mode" -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'read-special' -d 'Open device files like regular files' -n "__fish_seen_subcommand_from create" +# Archive options +complete -c borg -f -l 'comment' -d 'Add COMMENT to the archive' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'timestamp' -d 'Set creation TIME (yyyy-mm-ddThh:mm:ss)' -n "__fish_seen_subcommand_from create" +complete -c borg -l 'timestamp' -d 'Set creation time by reference FILE' -n "__fish_seen_subcommand_from create" +complete -c borg -f -s c -l 'checkpoint-interval' -d 'Write checkpoint every N seconds [1800]' -n "__fish_seen_subcommand_from create" +complete -c borg -f -l 'chunker-params' -d 'Chunker PARAMETERS [19,23,21,4095]' -n "__fish_seen_subcommand_from create" +set -l compression_methods "none auto lz4 zstd,1 zstd,2 zstd,3 zstd,4 zstd,5 zstd,6 zstd,7 zstd,8 zstd,9 zstd,10 zstd,11 zstd,12 zstd,13 zstd,14 zstd,15 zstd,16 zstd,17 zstd,18 zstd,19 zstd,20 zstd,21 zstd,22 zlib,1 zlib,2 zlib,3 zlib,4 zlib,5 zlib,6 zlib,7 zlib,8 zlib,9 lzma,0 lzma,1 lzma,2 lzma,3 lzma,4 lzma,5 lzma,6 lzma,7 lzma,8 lzma,9" +complete -c borg -f -s C -l 'compression' -d 'Select compression ALGORITHM,LEVEL [lz4]' -a "$compression_methods" -n "__fish_seen_subcommand_from create" + +# borg extract options +complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from extract" +complete -c borg -f -s n -l 'dry-run' -d 'Do not actually extract any files' -n "__fish_seen_subcommand_from extract" +complete -c borg -f -l 'numeric-owner' -d 'Only obey numeric user:group identifiers' -n "__fish_seen_subcommand_from extract" +complete -c borg -f -l 'nobsdflags' -d 'Do not extract/set bsdflags' -n "__fish_seen_subcommand_from extract" +complete -c borg -f -l 'stdout' -d 'Write all extracted data to stdout' -n "__fish_seen_subcommand_from extract" +complete -c borg -f -l 'sparse' -d 'Create holes in output sparse file' -n "__fish_seen_subcommand_from extract" +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from extract" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from extract" +complete -c borg -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from extract" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from extract" +complete -c borg -f -l 'strip-components' -d 'Remove NUMBER of leading path elements' -n "__fish_seen_subcommand_from extract" + +# borg check options +complete -c borg -f -l 'repository-only' -d 'Only perform repository checks' -n "__fish_seen_subcommand_from check" +complete -c borg -f -l 'archives-only' -d 'Only perform archives checks' -n "__fish_seen_subcommand_from check" +complete -c borg -f -l 'verify-data' -d 'Cryptographic integrity verification' -n "__fish_seen_subcommand_from check" +complete -c borg -f -l 'repair' -d 'Attempt to repair found inconsistencies' -n "__fish_seen_subcommand_from check" +complete -c borg -f -l 'save-space' -d 'Work slower but using less space' -n "__fish_seen_subcommand_from check" +# Archive filters +complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from check" +complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from check" +set -l sort_keys "timestamp name id" +complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from check" +complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from check" +complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from check" + +# borg rename +# no specific options + +# borg list options +complete -c borg -f -l 'short' -d 'Only print file/directory names' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'list-format' -d 'Specify FORMAT for file listing' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'format' -d 'Specify FORMAT for file listing' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'json' -d 'List contents in json format' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'json-lines' -d 'List contents in json lines format' -n "__fish_seen_subcommand_from list" +# Archive filters +complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from list" +complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from list" +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from list" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from list" +complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from list" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from list" + +# borg diff options +complete -c borg -f -l 'numeric-owner' -d 'Only consider numeric user:group' -n "__fish_seen_subcommand_from diff" +complete -c borg -f -l 'same-chunker-params' -d 'Override check of chunker parameters' -n "__fish_seen_subcommand_from diff" +complete -c borg -f -l 'sort' -d 'Sort the output lines by file path' -n "__fish_seen_subcommand_from diff" +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from diff" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from diff" +complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from diff" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from diff" + +# borg delete options +complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from delete" +complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from delete" +complete -c borg -f -l 'cache-only' -d "Delete only the local cache" -n "__fish_seen_subcommand_from delete" +complete -c borg -f -l 'force' -d 'Force deletion of corrupted archives' -n "__fish_seen_subcommand_from delete" +complete -c borg -f -l 'save-space' -d 'Work slower but using less space' -n "__fish_seen_subcommand_from delete" +# Archive filters +complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from delete" +complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from delete" +complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from delete" +complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from delete" +complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from delete" + +# borg prune options +complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'force' -d 'Force pruning of corrupted archives' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'keep-within' -d 'Keep archives within time INTERVAL' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'keep-last' -d 'NUMBER of secondly archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'keep-secondly' -d 'NUMBER of secondly archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'keep-minutely' -d 'NUMBER of minutely archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s H -l 'keep-hourly' -d 'NUMBER of hourly archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s d -l 'keep-daily' -d 'NUMBER of daily archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s w -l 'keep-weekly' -d 'NUMBER of weekly archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s m -l 'keep-monthly' -d 'NUMBER of monthly archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s y -l 'keep-yearly' -d 'NUMBER of yearly archives to keep' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -l 'save-space' -d 'Work slower but using less space' -n "__fish_seen_subcommand_from prune" +# Archive filters +complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from prune" +complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from prune" + +# borg info options +complete -c borg -f -l 'json' -d 'Format output in json format' -n "__fish_seen_subcommand_from info" +# Archive filters +complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from info" +complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from info" +complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from info" +complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from info" +complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from info" + +# borg mount options +complete -c borg -f -s f -l 'foreground' -d 'Stay in foreground, do not daemonize' -n "__fish_seen_subcommand_from mount" +# FIXME This list is probably not full, but I tried to pick only those that are relevant to borg mount -o: +set -l fuse_options "ac_attr_timeout= allow_damaged_files allow_other allow_root attr_timeout= auto auto_cache auto_unmount default_permissions entry_timeout= gid= group_id= kernel_cache max_read= negative_timeout= noauto noforget remember= remount rootmode= uid= umask= user user_id= versions" +complete -c borg -f -s o -d 'Fuse mount OPTION' -a "$fuse_options" -n "__fish_seen_subcommand_from mount" +# Archive filters +complete -c borg -f -s P -l 'prefix' -d 'Only archive names starting with PREFIX' -n "__fish_seen_subcommand_from mount" +complete -c borg -f -s a -l 'glob-archives' -d 'Only archive names matching GLOB' -n "__fish_seen_subcommand_from mount" +complete -c borg -f -l 'sort-by' -d 'Sorting KEYS [timestamp]' -a "$sort_keys" -n "__fish_seen_subcommand_from mount" +complete -c borg -f -l 'first' -d 'Only first N archives' -n "__fish_seen_subcommand_from mount" +complete -c borg -f -l 'last' -d 'Only last N archives' -n "__fish_seen_subcommand_from mount" +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from mount" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from mount" +complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from mount" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from mount" +complete -c borg -f -l 'strip-components' -d 'Remove NUMBER of leading path elements' -n "__fish_seen_subcommand_from mount" + +# borg umount +# no specific options + +# borg key change-passphrase +# no specific options + +# borg key export +complete -c borg -f -l 'paper' -d 'Create an export for printing' -n "__fish_seen_subcommand_from export" +complete -c borg -f -l 'qr-html' -d 'Create an html file for printing and qr' -n "__fish_seen_subcommand_from export" + +# borg key import +complete -c borg -f -l 'paper' -d 'Import from a backup done with --paper' -n "__fish_seen_subcommand_from import" + +# borg upgrade +complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from upgrade" +complete -c borg -f -l 'inplace' -d 'Rewrite repository in place' -n "__fish_seen_subcommand_from upgrade" +complete -c borg -f -l 'force' -d 'Force upgrade' -n "__fish_seen_subcommand_from upgrade" +complete -c borg -f -l 'tam' -d 'Enable manifest authentication' -n "__fish_seen_subcommand_from upgrade" +complete -c borg -f -l 'disable-tam' -d 'Disable manifest authentication' -n "__fish_seen_subcommand_from upgrade" + +# borg recreate +complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'filter' -d 'Only items with given STATUSCHARS' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -s n -l 'dry-run' -d 'Do not change the repository' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -s s -l 'stats' -d 'Print verbose statistics' -n "__fish_seen_subcommand_from recreate" +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'exclude-caches' -d 'Exclude directories tagged as cache' -n "__fish_seen_subcommand_from recreate" +complete -c borg -l 'exclude-if-present' -d 'Exclude directories that contain FILENAME' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'keep-exclude-tags' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'keep-tag-files' -d 'Keep tag files of excluded directories' -n "__fish_seen_subcommand_from recreate" +# Archive options +complete -c borg -f -l 'target' -d "Create a new ARCHIVE" -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -s c -l 'checkpoint-interval' -d 'Write checkpoint every N seconds [1800]' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'comment' -d 'Add COMMENT to the archive' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'timestamp' -d 'Set creation TIME (yyyy-mm-ddThh:mm:ss)' -n "__fish_seen_subcommand_from recreate" +complete -c borg -l 'timestamp' -d 'Set creation time using reference FILE' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -s C -l 'compression' -d 'Select compression ALGORITHM,LEVEL [lz4]' -a "$compression_methods" -n "__fish_seen_subcommand_from recreate" +set -l recompress_when "if-different always never" +complete -c borg -f -l 'recompress' -d 'Recompress chunks CONDITION' -a "$recompress_when" -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'chunker-params' -d 'Chunker PARAMETERS [19,23,21,4095]' -n "__fish_seen_subcommand_from recreate" + +# borg export-tar options +complete -c borg -l 'tar-filter' -d 'Filter program to pipe data through' -n "__fish_seen_subcommand_from export-tar" +complete -c borg -f -l 'list' -d 'Print verbose list of items' -n "__fish_seen_subcommand_from export-tar" +# Exclusion options +complete -c borg -s e -l 'exclude' -d 'Exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" +complete -c borg -l 'exclude-from' -d 'Read exclude patterns from EXCLUDEFILE' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'pattern' -d 'Include/exclude paths matching PATTERN' -n "__fish_seen_subcommand_from recreate" +complete -c borg -l 'patterns-from' -d 'Include/exclude paths from PATTERNFILE' -n "__fish_seen_subcommand_from recreate" +complete -c borg -f -l 'strip-components' -d 'Remove NUMBER of leading path elements' -n "__fish_seen_subcommand_from recreate" + +# borg serve +complete -c borg -l 'restrict-to-path' -d 'Restrict repository access to PATH' -n "__fish_seen_subcommand_from serve" +complete -c borg -l 'restrict-to-repository' -d 'Restrict repository access at PATH' -n "__fish_seen_subcommand_from serve" +complete -c borg -f -l 'append-only' -d 'Only allow appending to repository' -n "__fish_seen_subcommand_from serve" +complete -c borg -f -l 'storage-quota' -d 'Override storage QUOTA of the repository' -n "__fish_seen_subcommand_from serve" + +# borg config +complete -c borg -f -s c -l 'cache' -d 'Get/set/list values in the repo cache' -n "__fish_seen_subcommand_from config" +complete -c borg -f -s d -l 'delete' -d 'Delete the KEY from the config' -n "__fish_seen_subcommand_from config" +complete -c borg -f -l 'list' -d 'List the configuration of the repo' -n "__fish_seen_subcommand_from config" + +# borg with-lock +# no specific options + +# borg break-lock +# no specific options + +# borg benchmark +# no specific options + +# borg help +# no specific options + + +# List repositories::archives + +function __fish_borg_is_argument_n --description 'Test if current argument is on Nth place' --argument n + set tokens (commandline --current-process --tokenize --cut-at-cursor) + set -l tokencount 0 + for token in $tokens + switch $token + case '-*' + # ignore command line switches + case '*' + set tokencount (math $tokencount+1) + end + end + return (test $tokencount -eq $n) +end + +function __fish_borg_is_dir_a_repository + set -l config_content + if test -f $argv[1]/README + and test -f $argv[1]/config + read config_content < $argv[1]/config ^/dev/null + end + return (string match --quiet '[repository]' $config_content) +end + +function __fish_borg_list_repos_or_archives + if string match --quiet --regex '.*::' '"'(commandline --current-token)'"' + # If the current token contains "::" then list the archives: + set -l repository_name (string replace --regex '::.*' '' (commandline --current-token)) + borg list --format="$repository_name::{archive}{TAB}{comment}{NEWLINE}" "$repository_name" ^/dev/null + else + # Otherwise list the repositories, directories and user@host entries: + set -l directories (commandline --cut-at-cursor --current-token)*/ + for directoryname in $directories + if __fish_borg_is_dir_a_repository $directoryname + printf '%s::\t%s\n' (string trim --right --chars='/' $directoryname) "Repository" + else + printf '%s\n' $directoryname + end + end + __fish_complete_user_at_hosts | string replace --regex '$' ':' + end +end + +complete -c borg -f -n "__fish_borg_is_argument_n 2" -a '(__fish_borg_list_repos_or_archives)' + + +# Additional archive listings + +function __fish_borg_is_diff_second_archive + return (string match --quiet --regex ' diff .*::[^ ]+ '(commandline --current-token)'$' (commandline)) +end + +function __fish_borg_is_delete_additional_archive + return (string match --quiet --regex ' delete .*::[^ ]+ ' (commandline)) +end + +function __fish_borg_list_only_archives + set -l repo_matches (string match --regex '([^ ]*)::' (commandline)) + borg list --format="{archive}{TAB}{comment}{NEWLINE}" "$repo_matches[2]" ^/dev/null +end + +complete -c borg -f -n __fish_borg_is_diff_second_archive -a '(__fish_borg_list_only_archives)' +complete -c borg -f -n __fish_borg_is_delete_additional_archive -a '(__fish_borg_list_only_archives)' diff --git a/scripts/shell_completions/zsh/_borg b/scripts/shell_completions/zsh/_borg new file mode 100644 index 0000000000..28e48a87ac --- /dev/null +++ b/scripts/shell_completions/zsh/_borg @@ -0,0 +1,1607 @@ +#compdef borg borgfs -P -value-,BORG_*,-default- + +# Zsh completion for Borg Backup 1.1.15. +# +# Recommended _borg specific settings: +# +# zstyle -e ':completion:*:*:borg-*:argument-rest:*' tag-order \ +# '[[ $words[CURRENT] == -* ]] && reply=( "! archives archive-files" "-" )' +# zstyle ':completion:*:*:(borg|-value-,BORG_)*' sort false +# zstyle ':completion:*:*:borg-config:argument-2:keys' list-grouped false +# zstyle ':completion:*:*:borg-*:*' gain-privileges true +# zstyle ':completion:*' fake-parameters 'BORG_REPO:scalar' +# +# Custom styles: +# +# archive-description-format +# Default: `{archive:<36} {time} [{id}]`. +# archive-sort +# In which order archive names should be listed. +# Possible values are: `inverse`, `timestamp`, `name`, `id`. +# file-description-format +# Default: `{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}`. +# path-style-selector +# Style selector used to select a path (when Borg would use either `pp` or `pf`). +# Default: `fm`. +# repository-suffix +# This boolean style controls whether to add the `::` auto-removable suffix to a repository. +# Default: `true`. + +(( $+functions[_borg_commands] )) || +_borg_commands() { + local -a commands_=( + 'benchmark:benchmark command' + 'break-lock:break repository and cache locks' + 'change-passphrase:change repository passphrase' + 'check:verify repository' + 'config:get and set configuration values' + 'create:create backup' + 'debug:debugging command (not intended for normal use)' + 'delete:delete archive' + 'diff:find differences in archive contents' + 'export-tar:create tarball from archive' + 'extract:extract archive contents' + 'help:extra help' + 'info:show repository or archive information' + 'init:initialize empty repository' + 'key:manage repository key' + 'list:list archive or repository contents' + 'mount:mount repository' + 'prune:prune archives' + 'recreate:re-create archives' + 'rename:rename archive' + 'serve:start repository server process' + 'umount:umount repository' + 'upgrade:upgrade repository format' + 'with-lock:run user command with lock held' + ) + _describe -t commands 'borg commands' commands_ +} + +(( $+functions[_borg-benchmark] )) || +_borg-benchmark() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + $common_options \ + ':type:(crud)' \ + ': :_borg_repository' \ + ':PATH:_files' +} + +(( $+functions[_borg-break-lock] )) || +_borg-break-lock() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + $common_options \ + ':: :_borg_repository' +} + +(( $+functions[_borg-change-passphrase] )) || +_borg-change-passphrase() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + $common_options \ + ':: :_borg_repository' +} + +(( $+functions[_borg-check] )) || +_borg-check() { + local -a common_options common_archive_filters_options + __borg_setup_common_options + __borg_setup_common_archive_filters_options + + _arguments -s -w -S : \ + '--repository-only[only perform repository checks]' \ + '--archives-only[only perform archives checks]' \ + '(--repository-only)--verify-data[perform cryptographic archive data integrity verification]' \ + '--repair[attempt to repair any inconsistencies found]' \ + '--save-space[work slower, but using less space]' \ + $common_archive_filters_options \ + $common_options \ + '::REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' +} + +(( $+functions[_borg-config] )) || +_borg-config() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + '(-c --cache)'{-c,--cache}'[get and set values from the repo cache]' \ + '(-d --delete)'{-d,--delete}'[delete the key from the config]' \ + '(-l --list)'{-l,--list}'[list the configuration of the repo]' \ + $common_options \ + ': :_borg_repository' \ + ': : _borg_config $line[1]' \ + '::VALUE' +} + +(( $+functions[_borg-create] )) || +_borg-create() { + local -a common_options common_create_options + __borg_setup_common_options + __borg_setup_common_create_options + + _arguments -s -w -S : \ + '*'{-e,--exclude}'=[exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files -f -e "$line[1]" fm "${(@)line[2,-1]}"' \ + '*--pattern=[experimental: include/exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files -p -f -e "$line[1]" sh "${(@)line[2,-1]}"' \ + $common_create_options \ + '(-s --stats)--json[Output stats as JSON. Implies --stats.]' \ + '--no-cache-sync[experimental: do not synchronize the cache. Implies not using the files cache.]' \ + '--no-files-cache[do not load/update the file metadata cache used to detect unchanged files]' \ + '--stdin-name=[use NAME in archive for stdin data (default: "stdin")]:NAME' \ + '--stdin-user=[set user USER in archive for stdin data (default: root)]:USER:_users' \ + '--stdin-group=[set group GROUP in archive for stdin data (default: root)]:GROUP:_groups' \ + '--stdin-mode=[set mode to M in archive for stdin data (default: 0660)]: : _borg_guard_numeric_mode "M"' \ + '--exclude-nodump[exclude files flagged NODUMP]' \ + '(-x --one-file-system)'{-x,--one-file-system}'[stay in the same file system]' \ + '--numeric-owner[only store numeric user and group identifiers]' \ + '--noatime[do not store atime into archive]' \ + '--noctime[do not store ctime into archive]' \ + '--nobirthtime[do not store birthtime (creation date) into archive]' \ + '--nobsdflags[do not read and store bsdflags (e.g. NODUMP, IMMUTABLE) into archive]' \ + '--ignore-inode[ignore inode data in the file metadata cache used to detect unchanged files]' \ + '--files-cache=[operate files cache in MODE. default: ctime,size,inode]:MODE:(ctime,size,inode mtime,size,inode ctime,size mtime,size rechunk,ctime rechunk,mtime disabled)' \ + '--read-special[open and read block and char device files as well as FIFOs as if they were regular files]' \ + $common_options \ + ':ARCHIVE: _borg_repository_or_archive -a -p' \ + '*:PATH:_files' +} + +(( $+functions[_borg-debug] )) || +_borg-debug() { + local -a state line common_options + local curcontext="$curcontext" state_descr + declare -A opt_args + local -i ret=1 + __borg_setup_common_options + + _arguments -s -w -C : \ + $common_options \ + ': :->command' \ + '*:: :->option-or-argument' && ret=0 + + case $state in + (command) + local -a debug_commands=( + 'info:show system infos for debugging / bug reports' + 'dump-archive-items:dump archive items (metadata)' + 'dump-archive:dump decoded archive metadata' + 'dump-manifest:dump decoded repository metadata' + 'dump-repo-objs:dump repo objects' + 'search-repo-objs:search repo objects' + 'get-obj:get object from repository' + 'put-obj:put object to repository' + 'delete-obj:delete object from repository' + 'refcount-obj:show refcount for object from repository' + 'convert-profile:convert Borg profile to Python profile' + ) + _describe -t commands 'command' debug_commands && ret=0 + ;; + (option-or-argument) + curcontext="${curcontext%:*}-$line[1]:" + + case $line[1] in + (info) + _arguments -s -w -S : \ + $common_options && ret=0 + ;; + (dump-archive-items) + _arguments -s -w -S : \ + $common_options \ + ':ARCHIVE: _borg_repository_or_archive -a' && ret=0 + ;; + (dump-archive) + _arguments -s -w -S : \ + $common_options \ + ':ARCHIVE: _borg_repository_or_archive -a' \ + ':PATH:_files' && ret=0 + ;; + (dump-manifest) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' \ + ':PATH:_files' && ret=0 + ;; + (dump-repo-objs) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' && ret=0 + ;; + (search-repo-objs) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' \ + ':WANTED (hex or string):' && ret=0 + ;; + (get-obj) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' \ + ':ID (hex object):' \ + ':PATH:_files' && ret=0 + ;; + (put-obj) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' \ + '*:PATH:_files' && ret=0 + ;; + (delete-obj) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' \ + '*:ID (hex object):' && ret=0 + ;; + (refcount-obj) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' \ + '*:ID (hex object):' && ret=0 + ;; + (convert-profile) + _arguments -s -w -S : \ + $common_options \ + ':INPUT:_files' \ + ':OUTPUT:_files' && ret=0 + ;; + (*) + if ! _call_function ret _borg_debug_$line[1]; then + _default && ret=0 + fi + ;; + esac + ;; + esac + + return ret +} + +(( $+functions[_borg-delete] )) || +_borg-delete() { + local -a common_options common_archive_filters_options common_dry_run_stats_options + __borg_setup_common_options + __borg_setup_common_archive_filters_options + __borg_setup_common_dry_run_stats_options + + _arguments -s -w -S : \ + $common_dry_run_stats_options \ + '--cache-only[delete only the local cache for the given repository]' \ + '*--force[force deletion of corrupted archives, use "--force --force" in case "--force" does not work]' \ + '--save-space[work slower, but using less space]' \ + $common_archive_filters_options \ + $common_options \ + ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ + '*:ARCHIVE: _borg_archive "${line[1]%%::*}"' +} + +(( $+functions[_borg-diff] )) || +_borg-diff() { + local -a common_options common_exclude_options + __borg_setup_common_options + __borg_setup_common_exclude_options + + _arguments -s -w -S : \ + '--numeric-owner[only obey numeric user and group identifiers]' \ + '--same-chunker-params[override check of chunker parameters]' \ + '--sort[sort the output lines by file path]' \ + $common_exclude_options \ + $common_options \ + ':ARCHIVE1: _borg_repository_or_archive -a' \ + ':ARCHIVE2: _borg_archive "${line[1]%%::*}"' \ + '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' +} + +(( $+functions[_borg-export-tar] )) || +_borg-export-tar() { + local -a common_options common_exclude_extract_options + __borg_setup_common_options + __borg_setup_common_exclude_extract_options + + _arguments -s -w -S : \ + '--tar-filter[filter program to pipe data through]: :_cmdstring' \ + '--list[output verbose list of items (files, dirs, ...)]' \ + $common_exclude_extract_options \ + $common_options \ + ':ARCHIVE: _borg_repository_or_archive -a' \ + ':FILE:_files' \ + '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' +} + +(( $+functions[_borg-extract] )) || +_borg-extract() { + local -a common_options common_exclude_extract_options + __borg_setup_common_options + __borg_setup_common_exclude_extract_options + + _arguments -s -w -S : \ + '--list[output verbose list of items (files, dirs, ...)]' \ + '(-n --dry-run)'{-n,--dry-run}'[do not actually change any files]' \ + '--numeric-owner[only obey numeric user and group identifiers]' \ + '--nobsdflags[do not extract/set bsdflags (e.g. NODUMP, IMMUTABLE)]' \ + '--stdout[write all extracted data to stdout]' \ + '--sparse[create holes in output sparse file from all-zero chunks]' \ + $common_exclude_extract_options \ + $common_options \ + ':ARCHIVE: _borg_repository_or_archive -a' \ + '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' +} + +(( $+functions[_borg-help] )) || +_borg-help() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + '--epilog-only' \ + '--usage-only' \ + $common_options \ + ':: : _alternative "topics:TOPIC:(patterns placeholders compression)" ": :_borg_commands"' +} + +(( $+functions[_borg-info] )) || +_borg-info() { + local -a common_options common_archive_filters_options + __borg_setup_common_options + __borg_setup_common_archive_filters_options + + _arguments -s -w -S : \ + '--json[format output as JSON]' \ + $common_archive_filters_options \ + $common_options \ + '::REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' +} + +(( $+functions[_borg-init] )) || +_borg-init() { + local -i ret=1 + local -a common_options common_init_options + __borg_setup_common_options + __borg_setup_common_init_options + + # special handling for the required optional argument + if (( ! ${words[(I)(-e|--encryption)(|=*)]} )); then + local desc='select encryption key mode' + local -a long=( "--encryption:$desc" ) short=( "-e:$desc" ) remove_chars=( -r '= \t\n\-' ) + _describe -t required-options 'required option' long -S '=' $remove_chars -- short $remove_chars && ret=0 + fi + + _arguments -s -w -S : \ + '(-e --encryption)'{-e,--encryption}'=[select encryption key mode (required)]:MODE:(none keyfile keyfile-blake2 repokey repokey-blake2 authenticated authenticated-blake2)' \ + $common_init_options \ + '--make-parent-dirs[create parent directories]' \ + '::REPOSITORY:_directories' && ret=0 + + return ret +} + +(( $+functions[_borg-key] )) || +_borg-key() { + local -a state line common_options + local curcontext="$curcontext" state_descr + declare -A opt_args + local -i ret=1 + __borg_setup_common_options + + _arguments -s -w -C : \ + $common_options \ + ': :->command' \ + '*:: :->option-or-argument' && ret=0 + + case $state in + (command) + local -a key_commands=( + 'change-passphrase:Change repository key file passphrase' + 'export:Export the repository key for backup' + 'import:Import the repository key from backup' + 'migrate-to-repokey:Migrate passphrase -> repokey' + ) + _describe -t commands 'command' key_commands && ret=0 + ;; + (option-or-argument) + curcontext="${curcontext%:*}-$line[1]:" + + case $line[1] in + (change-passphrase) + _arguments -s -w -S : \ + $common_options \ + ': :_borg_repository' && ret=0 + ;; + (export) + _arguments -s -w -S : \ + '--paper[create an export suitable for printing and later type-in]' \ + '--qr-html[create an html file suitable for printing and later type-in or qr scan]' \ + $common_options \ + ': :_borg_repository' \ + '::PATH:_files' && ret=0 + ;; + (import) + _arguments -s -w -S : \ + '--paper[interactively import from a backup done with --paper]' \ + $common_options \ + ': :_borg_repository' \ + '::PATH:_files' && ret=0 + ;; + (migrate-to-repokey) + _arguments -s -w -S : \ + $common_options \ + ':: :_borg_repository' && ret=0 + ;; + (*) + if ! _call_function ret _borg_key_$line[1]; then + _default && ret=0 + fi + ;; + esac + ;; + esac + + return ret +} + +(( $+functions[_borg-list] )) || +_borg-list() { + local -a common_options common_exclude_options common_archive_filters_options + __borg_setup_common_options + __borg_setup_common_exclude_options + __borg_setup_common_archive_filters_options + + _arguments -s -w -S : \ + '--short[only print file/directory names, nothing else]' \ + {--format,--list-format}'=[specify format for file listing]:FORMAT: _borg_format_keys $line[1]' \ + '--json[Only valid for listing repository contents. Format output as JSON.]' \ + '--json-lines[Only valid for listing archive contents. Format output as JSON Lines.]' \ + $common_archive_filters_options \ + $common_exclude_options \ + $common_options \ + ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ + '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' +} + +(( $+functions[_borg-mount] )) || +_borg-mount() { + local -a common_options common_exclude_extract_options common_archive_filters_options + __borg_setup_common_options + __borg_setup_common_exclude_extract_options + __borg_setup_common_archive_filters_options + + _arguments -s -w -S : \ + $* \ + '(-f --foreground)'{-f,--foreground}'[stay in foreground, do not daemonize]' \ + '-o[mount options]: :_fuse_values "mount options" + "versions[merged, versioned view of the files in the archives]" + "allow_damaged_files[read damaged files]" + "ignore_permissions[not enforce \"default_permissions\"]"' \ + $common_archive_filters_options \ + $common_exclude_extract_options \ + $common_options \ + ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ + ':MOUNTPOINT:_directories' \ + '*: : _borg_style_selector_or_archive_files "$line[1]" pp' +} + +(( $+functions[_borg-prune] )) || +_borg-prune() { + local -a common_options common_prefix_and_glob_archives_filter_options common_dry_run_stats_options + __borg_setup_common_options + __borg_setup_common_prefix_and_glob_archives_filter_options + __borg_setup_common_dry_run_stats_options + + _arguments -s -w -S : \ + $common_dry_run_stats_options \ + '*--force[force pruning of corrupted archives, use "--force --force" in case "--force" does not work]' \ + '--list[output verbose list of archives it keeps/prunes]' \ + '--keep-within[keep all archives within this time interval]: : _borg_guard_unsigned_number "INTERVAL"' \ + '(--keep-last --keep-secondly)'{--keep-last,--keep-secondly}'[number of secondly archives to keep]: : _borg_guard_unsigned_number "N"' \ + '--keep-minutely[number of minutely archives to keep]: : _borg_guard_unsigned_number "N"' \ + '(-H --keep-hourly)'{-H,--keep-hourly}'[number of hourly archives to keep]: : _borg_guard_unsigned_number "N"' \ + '(-d --keep-daily)'{-d,--keep-daily}'[number of daily archives to keep]: : _borg_guard_unsigned_number "N"' \ + '(-w --keep-weekly)'{-w,--keep-weekly}'[number of weekly archives to keep]: : _borg_guard_unsigned_number "N"' \ + '(-m --keep-monthly)'{-m,--keep-monthly}'[number of monthly archives to keep]: : _borg_guard_unsigned_number "N"' \ + '(-y --keep-yearly)'{-y,--keep-yearly}'[number of yearly archives to keep]: : _borg_guard_unsigned_number "N"' \ + '--save-space[work slower, but using less space]' \ + $common_prefix_and_glob_archives_filter_options \ + $common_options \ + ':: :_borg_repository' +} + +(( $+functions[_borg-recreate] )) || +_borg-recreate() { + local -a common_options common_create_options + __borg_setup_common_options + __borg_setup_common_create_options + + local -a mods=( + 'if-different:recompress if current compression is with a different compression algorithm (the level is not considered)' + 'always:recompress even if current compression is with the same compression algorithm (use this to change the compression level)' + 'never:do not recompress (use this option to explicitly prevent recompression)' + ) + mods=( ${${(q)mods//\\/\\\\}//:/\\:} ) + + _arguments -s -w -S : \ + $common_create_options \ + '--target=[create a new archive with the name ARCHIVE]:ARCHIVE: _borg_placeholder_or_archive "${line[1]%%\:\:*}"' \ + '--recompress=[recompress data chunks according to "MODE" and "--compression"]:MODE:'"(($mods))" \ + $common_options \ + ':REPOSITORY_OR_ARCHIVE: _borg_repository_or_archive' \ + '*: : _borg_style_selector_or_archive_files -e "$line[1]" pp' +} + +(( $+functions[_borg-rename] )) || +_borg-rename() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + $common_options \ + ':ARCHIVE: _borg_repository_or_archive -a' \ + ':NEWNAME' +} + +(( $+functions[_borg-serve] )) || +_borg-serve() { + local -a common_options common_init_options + __borg_setup_common_options + __borg_setup_common_init_options + + _arguments -s -w -S : \ + $common_init_options \ + '*--restrict-to-path=[restrict repository access to PATH]:PATH:_files' \ + '*--restrict-to-repository=[restrict repository access]: :_borg_repository' +} + +(( $+functions[_borg-umount] )) || +_borg-umount() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + $common_options \ + ':MOUNTPOINT:_umountable' +} + +(( $+functions[_borg-upgrade] )) || +_borg-upgrade() { + local -a common_options + __borg_setup_common_options + + _arguments -s -w -S : \ + '(-n --dry-run)'{-n,--dry-run}'[do not change repository]' \ + '--inplace[rewrite repository in place, with no chance of going back to older versions of the repository]' \ + '--force[force upgrade]' \ + '--tam[enable manifest authentication (in key and cache)]' \ + '--disable-tam[disable manifest authentication (in key and cache)]' \ + $common_options \ + ':: :_borg_repository' +} + +(( $+functions[_borg-with-lock] )) || +_borg-with-lock() { + local -a state line common_options + local curcontext="$curcontext" state_descr + declare -A opt_args + local -i ret=1 + __borg_setup_common_options + + _arguments -s -w -C -S : \ + $common_options \ + '(-): :_borg_repository' \ + '(-):COMMAND: _command_names -e' \ + '(-)*:ARGS:->normal' && ret=0 + + case $state in + (normal) + shift 2 words + (( CURRENT -= 2 )) + _normal && ret=0 + ;; + esac + + return ret +} + +(( $+functions[__borg_setup_common_options] )) || +__borg_setup_common_options() { + typeset -ga common_options=( + '(- :)'{-h,--help}'[show this help message and exit]' + '--critical[work on log level CRITICAL]' + '--error[work on log level ERROR]' + '--warning[work on log level WARNING (default)]' + '(--info -v --verbose)'{--info,-v,--verbose}'[work on log level INFO]' + '--debug[work on log level DEBUG]' + '--debug-topic=[enable TOPIC debugging (can be specified multiple times)]:TOPIC' + '(-p --progress)'{-p,--progress}'[show progress information]' + '--log-json[Output one JSON object per log line instead of formatted text.]' + '--lock-wait=[wait at most SECONDS for acquiring a repository/cache lock (default: 1)]: : _borg_guard_unsigned_number "SECONDS"' + '--bypass-lock[bypass locking mechanism]' + '(- :)--show-version[show/log the borg version]' + '--show-rc[show/log the return code (rc)]' + '--umask=[set umask to M (local and remote, default: 0077)]: : _borg_guard_numeric_mode "M"' + '--remote-path=[set remote path to executable (default: "borg")]: :_cmdstring' + '--remote-ratelimit=[set remote network upload rate limit in kiByte/s (default: 0=unlimited)]: : _borg_guard_unsigned_number "RATE"' + '--consider-part-files[treat part files like normal files (e.g. to list/extract them)]' + '--debug-profile=[write execution profile in Borg format into FILE]:FILE:_files' + '--rsh=[use COMMAND instead of ssh]: :_cmdstring' + ) +} + +(( $+functions[__borg_setup_common_exclude_options] )) || +__borg_setup_common_exclude_options() { + typeset -ga common_exclude_options=( + '*'{-e,--exclude}'=[exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files "$line[1]" fm' + '*--exclude-from=[read exclude patterns from EXCLUDEFILE, one per line]:EXCLUDEFILE:_files' + '*--pattern=[experimental: include/exclude paths matching PATTERN]: : _borg_style_selector_or_archive_files -p "$line[1]" sh' + '*--patterns-from=[experimental: read include/exclude patterns from PATTERNFILE, one per line]:PATTERNFILE:_files' + ) +} + +(( $+functions[__borg_setup_common_exclude_extract_options] )) || +__borg_setup_common_exclude_extract_options() { + local -a common_exclude_options + __borg_setup_common_exclude_options + typeset -ga common_exclude_extract_options=( + $common_exclude_options + '--strip-components=[Remove the specified number of leading path elements. Paths with fewer elements will be silently skipped.]: : _borg_guard_unsigned_number "NUMBER"' + ) +} + +(( $+functions[__borg_setup_common_prefix_and_glob_archives_filter_options] )) || +__borg_setup_common_prefix_and_glob_archives_filter_options() { + typeset -ga common_prefix_and_glob_archives_filter_options=( + '(-P --prefix -a --glob-archives)'{-P,--prefix}'=[only consider archive names starting with this prefix]:PREFIX: _borg_archive -n "${line[1]%%\:\:*}"' + '(-P --prefix)*'{-a,--glob-archives}'=[only consider archive names matching the glob]:GLOB: _borg_archive -n "${line[1]%%\:\:*}"' + ) +} + +(( $+functions[__borg_setup_common_archive_filters_options] )) || +__borg_setup_common_archive_filters_options() { + local -a common_prefix_and_glob_archives_filter_options + __borg_setup_common_prefix_and_glob_archives_filter_options + typeset -ga common_archive_filters_options=( + $common_prefix_and_glob_archives_filter_options + '--sort-by=[Comma-separated list of sorting keys, default: timestamp]:KEYS:(timestamp name id)' + '(--last)--first=[consider first N archives after other filters were applied]:N: _borg_archive -n "${line[1]%%\:\:*}"' + '(--first)--last=[consider last N archives after other filters were applied]:N: _borg_archive -n "${line[1]%%\:\:*}"' + ) +} + +(( $+functions[__borg_setup_common_dry_run_stats_options] )) || +__borg_setup_common_dry_run_stats_options() { + typeset -ga common_dry_run_stats_options=( + '(-n --dry-run -s --stats)'{-n,--dry-run}'[do not change anything]' + '(-n --dry-run -s --stats)'{-s,--stats}'[print statistics at end]' + # NOTE: actual messages for subcommands differ in details + ) +} + +(( $+functions[__borg_setup_common_create_options] )) || +__borg_setup_common_create_options() { + local -a common_dry_run_stats_options common_exclude_options + __borg_setup_common_dry_run_stats_options + __borg_setup_common_exclude_options + typeset -ga common_create_options=( + $common_dry_run_stats_options + '--list[output verbose list of items (files, dirs, ...)]' + '--filter=[only display items with the given status characters]: :_borg_statuschars' + $common_exclude_options + '--exclude-caches[exclude directories that contain a CACHEDIR.TAG file]' + '*--exclude-if-present=[exclude directories that are tagged by containing a filesystem object with the given NAME]:NAME:_files' + '--keep-'{exclude-tags,tag-files}'[if tag objects are specified with --exclude-if-present, don'\''t omit the tag objects themselves]' + '--comment=[add a comment text to the archive]:COMMENT:_borg_placeholders' + '--timestamp=[manually specify the archive creation date/time]:TIMESTAMP:_borg_timestamp' + '(-c --checkpoint-interval)'{-c,--checkpoint-interval}'=[write checkpoint every SECONDS seconds (default: 1800)]: : _borg_guard_unsigned_number "SECONDS"' + '--chunker-params=[specify the chunker parameters]: :_borg_chunker_params' + '(-C --compression)'{-C,--compression}'=[select compression algorithm]: :_borg_compression' + ) +} + +(( $+functions[__borg_setup_common_init_options] )) || +__borg_setup_common_init_options() { + local -a common_options + __borg_setup_common_options + typeset -ga common_init_options=( + $common_options + '--append-only[only allow appending to repository segment files]' + '--storage-quota=[override storage quota of the repository]: :_borg_quota_suffixes' + ) +} + +(( $+functions[_borgfs] )) || +_borgfs() { + _borg-mount '(- :)'{-V,--version}'[show version number and exit]' +} + +(( $+functions[_borg_parameters] )) || +_borg_parameters() { + local name=$1 + shift + local -i ret=1 + local -a expl + + case $name in + (REPO) + local BORG_REPO + unset BORG_REPO + _borg_repository && ret=0 + ;; + ((|NEW_)PASSPHRASE) + _message -e 'passphrase' && ret=0 + ;; + (DISPLAY_PASSPHRASE) + _message -e 'answer to the "display the passphrase for verification" question' && ret=0 + ;; + (HOST_ID) + _message -e 'unique ID' && ret=0 + ;; + (FILES_CACHE_SUFFIX) + _message -e 'suffix' && ret=0 + ;; + (FILES_CACHE_TTL) + _borg_guard_unsigned_number 'time to live (default: 20)' && ret=0 + ;; + (MOUNT_DATA_CACHE_ENTRIES) + _borg_guard_unsigned_number 'number of cached data chunks' && ret=0 + ;; + (PASSCOMMAND|RSH|REMOTE_PATH) + _cmdstring && ret=0 + ;; + (HOSTNAME_IS_UNIQUE|SHOW_SYSINFO|(UNKNOWN_UNENCRYPTED|RELOCATED)_REPO_ACCESS_IS_OK) + _description values expl 'value' + compadd "$expl[@]" yes no && ret=0 + ;; + ((CHECK|DELETE)_I_KNOW_WHAT_I_AM_DOING) + _description values expl 'value' + compadd "$expl[@]" YES NO && ret=0 + ;; + (WORKAROUNDS) + _wanted workarounds expl 'workaround' _sequence -n 1 compadd - basesyncfile && ret=0 + ;; + (KEYS_DIR) + _directories && ret=0 + ;; + (*) + _default && ret=0 + ;; + esac + + return ret +} + +(( $+functions[_borg_repository] )) || +_borg_repository() { + local -a alts opts qopts + zparseopts -E -a opts S: + qopts=( ${(q)opts} ) + [[ -n $BORG_REPO ]] && alts+=( "default-repository: : __borg_default_repository $qopts" ) + alts+=( "cached-repositories:cached repositories:_borg_cached_repositories $qopts" ) + alts+=( 'directories: :_directories -r ":/ \t\n\-"' ) + alts+=( 'remote-repositories: : _borg_remote_repositories' ) + _alternative $alts +} + +(( $+functions[__borg_default_repository] )) || +__borg_default_repository() { + local -a opts suf + zparseopts -E -a opts S: + (( $opts[(I)-S] )) && suf=( -S '' ) + local -a default_repository=( "\:\::$BORG_REPO" ) + _describe -t default-repository 'default repository' default_repository "$suf[@]" +} + +(( $+functions[_borg_cached_repositories] )) || +_borg_cached_repositories() { + local -a cached_repos + local sed_script='/^previous_location = / { + s/// + # no port was given + /ssh:\/\/[^/:]+:[0-9]+/! { + # lstrip the `ssh://` prefix and add a colon before the first slash + s!ssh://([^:/]+)/(.*)!\1:/\2! + } + p + }' + local cachedir=${BORG_CACHE_DIR:-${XDG_CACHE_HOME:-${BORG_BASE_DIR:-$HOME}/.cache}/borg} + cached_repos=( ${(f)"$(_call_program -p cached-repositories sed -n -E ${(q)sed_script} \ + "${(q)cachedir}/*/config(#qN.om)" 2>/dev/null)"} ) + + if [[ $compstate[quote] != (\'|\") ]]; then + # hide ~BORG_REPO and other scalars + local BORG_REPO + unset BORG_REPO sed_script cachedir + cached_repos=( "${(@D)cached_repos}" ) + fi + + compadd -Q "$@" -r ': \t\n\-' -a cached_repos +} + +(( $+functions[_borg_remote_repositories] )) || +_borg_remote_repositories() { + local -a match mbegin mend expl alts + if compset -P '(#b)ssh://[^/]##@[^/]##:([0-9]##)/'; then + _remote_files -/ -- ssh -p $match[1] + return + fi + local -i have_scheme=0 + compset -P 'ssh://' && have_scheme=1 + if compset -P '*:'; then + (( have_scheme )) && alts+=( 'ports: : _borg_guard_unsigned_number "port"' ) + alts+=( 'remote-files:remote file: _remote_files -/ -- ssh' ) + _alternative $alts + elif compset -P 1 '(#b)(*)@'; then + local user=$match[1] + _wanted -C user-at hosts expl "host for $user" \ + _combination -s '[:@]' accounts users-hosts users="$user" hosts -S ':' - + elif compset -S '@*'; then + _wanted users expl "user" \ + _combination -s '[:@]' accounts users-hosts users -q - + else + alts=( + 'users:user:_users -S "@"' + 'hosts:host:_hosts -S ":"' + ) + (( ! have_scheme )) && alts+=( 'prefixes:ssh:compadd -S "" ssh://' ) + _alternative $alts + fi +} + +# _borg_repository_or_archive [-a] [-p] +# +# -a archive is mandatory. The suffix `::` will be added to the repository if possible. +# -p complete placeholders +(( $+functions[_borg_repository_or_archive] )) || +_borg_repository_or_archive() { + local -A opts + zparseopts -A opts -D -E a p + + if compset -P 1 '*::'; then + local qrepo=$IPREFIX[1,-3] + local -i def_repo=0 + [[ -z $qrepo && -n $BORG_REPO ]] && qrepo=${(q)BORG_REPO} && def_repo=1 + if [[ -n $qrepo ]]; then + + if (( ! def_repo )); then + case $compstate[quote] in + (\') qrepo=${(qq)qrepo} ;; + (\") qrepo=${(qqq)${(e)qrepo}} ;; + # NOTE: currently `(e)` don't have any effect, but maybe one day zsh will stop to change the quoting method + # of double quoted parameters + esac + fi + + if (( $+opts[-p] )); then + _borg_placeholder_or_archive $qrepo + else + _borg_archive $qrepo + fi + else + _message "not a borg repository: ${(Q)qrepo}" + return 1 + fi + else + local -a suf + if ! compset -S '::*'; then + if (( $+opts[-a] )) || zstyle -T ":completion:${curcontext}:repositories" repository-suffix; then + suf=( -S '::' ) + fi + local oqrepo="$PREFIX$SUFFIX" + local qrepo=$oqrepo + [[ $compstate[quote] != (\'|\") ]] && qrepo=${(Q)qrepo} + if __borg_is_borg_repo $qrepo; then + qrepo=${oqrepo%%/} + [[ -z $SUFFIX ]] && PREFIX=${PREFIX%%/} || SUFFIX=${SUFFIX%%/} + compadd -S '::' -r ':/ \t\n\-' -Q -- $qrepo + return + fi + fi + _borg_repository "$suf[@]" + fi +} + +# _borg_archive [-F] [-n] [qrepo] +# +# -F don't apply archive filter options on the command line +# -n reverse order, disable matchers and don't do menu completion/selection +(( $+functions[_borg_archive] )) || +_borg_archive() { + local -A opts + zparseopts -A opts -D -E F n + + local qrepo=$1 + + if [[ -z $qrepo ]]; then + if [[ -n $BORG_REPO ]]; then + qrepo=${(q)BORG_REPO} + else + _message 'no repository specified' + return 1 + fi + fi + + local -i ret=1 + _tags archives + while _tags; do + + if _requested archives; then + + local -a expl disp archive_filters + local -i reversed_order=1 + + if (( ! $+opts[-F] )); then + local -a archive_filter_options=( -P --prefix -a --glob-archives --first --last --sort-by ) tmp + local k + for k in $archive_filter_options; do + if [[ -n $opt_args[$k] ]]; then + IFS=: read -A tmp <<<$opt_args[$k] + archive_filters+=( $k=${^tmp:#} ) + fi + done + fi + + if (( $+opts[-n] )); then + __borg_skip_pattern_matching || return 1 + + disp+=( -U ) + + compstate[insert]='' + compstate[list]='list force' + + reversed_order=0 + fi + + local -a asort + zstyle -a ":completion:${curcontext}:archives" archive-sort asort + if (( $asort[(I)inverse] )); then + (( reversed_order = ! reversed_order )) + fi + local -a sort_by=( --sort-by=${(M)^asort:#(timestamp|name|id)} ) + # NOTE: in case of option repetition, the later one takes precedence + + if (( ! $+__borg_archives_need_update )); then + comppostfuncs+=( __borg_unset_archives_need_update ) + typeset -gHi __borg_archives_need_update=1 + if (( ! $#archive_filters && ! $+opts[-n] )); then + local erepo + [[ -n $1 ]] && __borg_expand_path ${(Q)qrepo} erepo + local -a newest_file=( $erepo/(hints|index|integrity).<1->(#qN.om[1]) ) + if [[ -n $newest_file ]]; then + if zmodload -F zsh/stat b:zstat 2>/dev/null; then + local -a stats + zstat -A stats +mtime $newest_file + local -i mtime=$stats[1] + if [[ $__borg_prev_repo == $erepo + && __borg_prev_mtime -ge mtime + && $__borg_prev_order == $reversed_order + && $__borg_prev_sort_by == $sort_by ]] + then + __borg_archives_need_update=0 + else + typeset -gH __borg_prev_repo=$erepo + typeset -gHi __borg_prev_mtime=mtime __borg_prev_order=reversed_order + typeset -gHa __borg_prev_sort_by=( $sort_by ) + fi + fi + fi + else + unset __borg_prev_{repo,mtime,order,sort_by} + comppostfuncs+=( __borg_unset_archives ) + fi + fi + + if zstyle -t ":completion:${curcontext}:archives" verbose; then + if (( __borg_archives_need_update || ! $+__borg_archive_names || ! $+__borg_archive_descriptions )); then + __borg_archives_need_update=0 + typeset -gHa __borg_archive_names=() __borg_archive_descriptions=() + local fmt descfmt name desc + zstyle -s ":completion:${curcontext}:archives" archive-description-format descfmt || + descfmt='{archive:<36} {time} [{id}]' + fmt="{barchive}{NUL}$descfmt{NUL}" + _call_program -p archive-descriptions \ + ${(q)__borg_command:-borg} list --format=${(q)fmt} ${(q)sort_by} $archive_filters $qrepo 2>/dev/null | + while IFS= read -r -d $'\0' name && IFS= read -r -d $'\0' descr; do + __borg_archive_names[1,0]=( $name ) + __borg_archive_descriptions[1,0]=( "$descr" ) + done + (( $pipestatus[1] )) && { + _message "couldn't list repository: ${(Q)qrepo}" + unset __borg_prev_{repo,mtime,order,sort_by} + return 1 + } + (( ! reversed_order )) && + __borg_archive_names=( "${(@aO)__borg_archive_names}" ) && + __borg_archive_descriptions=( "${(@aO)__borg_archive_descriptions}" ) + fi + disp+=( -ld __borg_archive_descriptions ) + elif (( __borg_archives_need_update || ! $+__borg_archive_names )); then + __borg_archives_need_update=0 + typeset -gHa __borg_archive_names=() + local fmt='{barchive}{NUL}' + __borg_archive_names=( ${(@0aO)"$(_call_program -p archives \ + ${(q)__borg_command:-borg} list --format=${(q)fmt} ${(q)sort_by} $archive_filters $qrepo 2>/dev/null)"} ) + (( $pipestatus[1] )) && { + _message "couldn't list repository: ${(Q)qrepo}" + unset __borg_prev_{repo,mtime,order,sort_by} + return 1 + } + (( ! reversed_order )) && + __borg_archive_names=( "${(@aO)__borg_archive_names}" ) + fi + + _all_labels archives expl 'ARCHIVE' compadd "$disp[@]" -a __borg_archive_names && ret=0 + + fi + + (( ret )) || return 0 + + done + + return 1 +} + +(( $+functions[__borg_unset_archives] )) || +__borg_unset_archives() { + unset __borg_archive_names __borg_archive_descriptions +} + +(( $+functions[__borg_unset_archives_need_update] )) || +__borg_unset_archives_need_update() { + unset __borg_archives_need_update +} + +(( $+functions[__borg_is_borg_repo] )) || +__borg_is_borg_repo() { + local repo=$1 + __borg_expand_path $repo repo + if [[ -d $repo && -d $repo/data ]]; then + local -a files=( $repo/(hints|index|integrity).<1->(#qN.) ) + (( $#files >= 3 )) && return 0 + fi + return 1 +} + +(( $+functions[__borg_expand_path] )) || +__borg_expand_path() { + local _path=$1 + local -a match mbegin mend + if [[ $_path == (#b)(\~[^/]#)(|/*) ]]; then + local etilde + etilde=$~match[1] 2>/dev/null + _path="$etilde$match[2]" + fi + _path=${(e)_path//\\\\/\\\\\\\\} + eval typeset -g ${2:-REPLY}=\$_path +} + +(( $+functions[_borg_placeholder_or_archive] )) || +_borg_placeholder_or_archive() { + local qrepo=$1 + shift + _alternative \ + 'placeholders: :_borg_placeholders' \ + "archives: : _borg_archive ${(q)qrepo}" +} + +(( $+functions[_borg_placeholders] )) || +_borg_placeholders() { + local -a placeholders=( + 'hostname:The (short) hostname of the machine.' + 'fqdn:The full name of the machine.' + 'reverse-fqdn:The full name of the machine in reverse domain name notation.' + 'now:The current local date and time, by default in ISO-8601 format. You can also supply your own format string, e.g. {now:%Y-%m-%d_%H:%M:%S}' + 'utcnow:The current UTC date and time, by default in ISO-8601 format. You can also supply your own format string, e.g. {utcnow:%Y-%m-%d_%H:%M:%S}' + 'user:The user name (or UID, if no name is available) of the user running borg.' + 'pid:The current process ID.' + 'borgversion:The version of borg, e.g.: 1.0.8rc1' + 'borgmajor:The version of borg, only the major version, e.g.: 1' + 'borgminor:The version of borg, only major and minor version, e.g.: 1.0' + 'borgpatch:The version of borg, only major, minor and patch version, e.g.: 1.0.8' + ) + __borg_complete_keys _describe -t placeholders 'placeholder' placeholders '"$copts[@]"' +} + +(( $+functions[_borg_format_keys] )) || +_borg_format_keys() { + local repo_or_arch=${(Q)1} + + local -a keys=( NEWLINE NL NUL SPACE TAB CR LF ) + local -a repository_keys=( archive name barchive comment bcomment id start time end hostname username ) + local -a archive_keys=( type mode uid gid user group path bpath source linktarget flags size csize dsize dcsize + num_chunks unique_chunks mtime ctime atime isomtime isoctime isoatime blake2b blake2s md5 sha1 sha224 sha256 sha384 + sha3_224 sha3_256 sha3_384 sha3_512 sha512 shake_128 shake_256 archiveid archivename extra health ) + + local akeys rkeys + akeys='archive-keys:archive keys:compadd -a archive_keys' + rkeys='repository-keys:repository keys:compadd -a repository_keys' + local -a alts=( 'keys:keys:compadd -a keys' ) + if [[ $repo_or_arch == *::?* ]]; then + alts+=( $akeys ) + elif [[ -n $repo_or_arch ]]; then + alts+=( $rkeys ) + else + alts+=( $rkeys $akeys ) + fi + + __borg_complete_keys _alternative -O copts ${(q)alts} +} + +(( $+functions[__borg_complete_keys] )) || +__borg_complete_keys() { + compset -P '*[^A-Za-z]##' + compset -S '[^A-Za-z]##*' + + [[ -n $ISUFFIX ]] && compstate[to_end]='' + # NOTE: `[[ -n $ISUFFIX ]]` is a workaround for a bug that causes cursor movement to the right further than it should + # NOTE: the _oldlist completer doesn't respect compstate[to_end]='' + + local ipref suf + if [[ $IPREFIX[-1] != '{' ]]; then + ipref='{' + [[ $compstate[quote] != (\'|\") ]] && ipref='\{' + fi + if [[ $ISUFFIX[1] != (|\\)\} ]]; then + suf='}' + [[ $compstate[quote] != (\'|\") ]] && suf='\}' + fi + + local -a copts=( -i "$ipref" -S "$suf" ) + eval "$@" +} + +# _borg_style_selector_or_archive_files [-e] [-p] archive default_style_selector +# +# -e apply exclusion options on the command line +# -p complete `--pattern` +# -f complete files rather than borg paths +(( $+functions[_borg_style_selector_or_archive_files] )) || +_borg_style_selector_or_archive_files() { + local -A opts + zparseopts -A opts -D -E e p f + + local arch=$1 default_style_selector=$2 + shift 2 + + local -a match mbegin mend expl tags=( style-selectors archive-files ) ss_suf=( -S ':' -r ':' ) + (( $+opts[-f] )) && tags=( style-selectors files ) + local -i ret=1 + + if (( $+opts[-p] )); then + if ! compset -P '(#b)([RP\+\-\!])'; then + local -a pattern_rules=( + 'P:pattern style' + 'R:root path' + '+:include' + '-:exclude' + '!:exclude non-recurse' + ) + _describe -t pattern-rules 'pattern rule' pattern_rules -S '' + return + else + if [[ $compstate[quote] == (\'|\") ]]; then + compset -P ' #' + else + compset -P '(\\ )#' + fi + if [[ $match[1] == 'R' ]]; then + default_style_selector='pp' + elif [[ $match[1] == 'P' ]]; then + tags=( style-selectors ) + ss_suf=() + fi + fi + fi + + _tags $tags + while _tags; do + if _requested style-selectors; then + _all_labels style-selectors expl 'style selector' \ + __borg_style_selectors $default_style_selector "$ss_suf[@]" - && ret=0 + fi + if _requested archive-files; then + _all_labels archive-files expl 'PATTERN' \ + __borg_archive_files ${(k)opts} "$arch" $default_style_selector - && ret=0 + fi + if _requested files; then + local -a borg_paths=( ${(Q)${(e)${~@}}} ) + _all_labels files expl 'PATH' \ + __borg_pattern_files ${(k)opts} borg_paths - && ret=0 + fi + (( ret )) || return 0 + done + + return 1 +} + +(( $+functions[__borg_style_selectors] )) || +__borg_style_selectors() { + local default_style_selector=$1 path_style_selector + shift + zstyle -s ":completion:${curcontext}:archive-files" path-style-selector path_style_selector || + path_style_selector='fm' + local -a disp + local -A style_selectors + __borg_setup_style_selectors + if zstyle -T ":completion:${curcontext}:style-selectors" verbose; then + local -a style_selector_descriptions extra + local k v sep + for k v in ${(kv)style_selectors}; do + extra=() + [[ $k == $default_style_selector ]] && extra+=( 'default' ) + [[ $k == $path_style_selector ]] && __borg_choose_path_or_pattern "" "$default_style_selector" && + extra+=( 'path' ) + (( $#extra )) && v+=" (${(j:, :)extra})" + style_selector_descriptions+=( "${${k//\\/\\\\}//:/\\:}:$v" ) + done + zstyle -s ":completion:${curcontext}:style-selectors" list-separator sep || sep=-- + zformat -a style_selector_descriptions " $sep " $style_selector_descriptions + disp=( -ld style_selector_descriptions ) + fi + compadd "$disp[@]" "$@" -k style_selectors +} + +(( $+functions[__borg_archive_files] )) || +__borg_archive_files() { + local -A opts + zparseopts -A opts -D e p + + local arch=$1 default_style_selector=$2 + shift 2 + + if [[ -z $arch || $arch != *::?* ]]; then + _message 'no archive specified' + return 1 + fi + + local -a qargs tmp disp pref match mbegin mend archive_files descs + local -A style_selectors + local k cword fmt descfmt style_selector path_style_selector name descr + + # take into account exclude options on the command line + if (( $+opts[-e] )); then + local -a exclude_options=( -e --exclude --exclude-from --pattern --pattern-from ) + local -a excludes + for k in $exclude_options; do + if [[ -n $opt_args[$k] ]]; then + IFS=: read -A tmp <<<$opt_args[$k] + excludes+=( $k="${^tmp[@]}" ) + fi + done + [[ -n $excludes ]] && qargs+=( "$excludes[@]" ) + fi + + (( $_matcher_num > 1 )) && return 1 + __borg_skip_pattern_matching || return 1 + + cword="$PREFIX$SUFFIX" + [[ $compstate[quote] != (\'|\") ]] && cword=${(Q)cword} + + [[ -z $cword ]] && return 1 + + if zstyle -t ":completion:${curcontext}:archive-files" verbose; then + zstyle -s ":completion:${curcontext}:archive-files" file-description-format descfmt || + descfmt='{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}' + fmt="{bpath}{NUL}$descfmt{NUL}" + else + fmt='{bpath}{NUL}' + fi + qargs+=( --format=${(q)fmt} ) + + qargs+=( $arch ) + + __borg_setup_style_selectors + [[ $cword == (#b)(${~${(j:|:)${(kb)style_selectors}}}):* ]] && style_selector=$match[1] + + local -i path_expected=0 + __borg_choose_path_or_pattern "$style_selector" $default_style_selector $cword && path_expected=1 + + if [[ -n $cword ]]; then + if (( path_expected )); then + [[ -n $style_selector ]] && compset -P "$style_selector:" && pref=( -P "$style_selector:" ) + cword="$PREFIX$SUFFIX" + [[ $compstate[quote] != (\'|\") ]] && cword=${(Q)cword} + zstyle -s ":completion:${curcontext}:archive-files" path-style-selector path_style_selector || + path_style_selector='fm' + cword="$path_style_selector:$cword" + else + [[ -z $style_selector ]] && cword="$default_style_selector:$cword" + fi + qargs+=( ${(q)cword} ) + fi + + if zstyle -t ":completion:${curcontext}:archive-files" verbose; then + _call_program -p archive-file-descriptions ${(q)__borg_command:-borg} list $qargs 2>/dev/null | + while IFS= read -r -d $'\0' name && IFS= read -r -d $'\0' descr; do + archive_files+=( $name ) + descs+=( $descr ) + done + (( $pipestatus[1] )) && { _message "couldn't list archive: ${(Q)arch}"; return 1 } + disp=( -ld descs ) + else + archive_files=( ${(0)"$(_call_program -p archive-files ${(q)__borg_command:-borg} list $qargs 2>/dev/null)"} ) + (( $pipestatus[1] )) && { _message "couldn't list archive: ${(Q)arch}"; return 1 } + fi + + if (( $#archive_files )); then + if (( path_expected )); then + compstate[insert]='automenu' + else + compstate[insert]='' + compstate[list]='list force' + fi + fi + + compadd "$pref[@]" -U "$disp[@]" "$@" -a archive_files +} + +(( $+functions[__borg_choose_path_or_pattern] )) || +__borg_choose_path_or_pattern() { + local ss=$1 defss=$2 cword=$3 + shift 2 + [[ $ss == (pp|pf) || ( -z $ss && $defss == (pp|pf) ) ]] +} + +# transform borg exclude patterns into zsh ignore patterns and then complete files +(( $+functions[__borg_pattern_files] )) || +__borg_pattern_files() { + local -A opts + zparseopts -A opts -D -E e p f + + local paths_varname=$1 + shift + + local -a args + local -A style_selectors + __borg_setup_style_selectors + local pr_pat='[RP\+\-\!]' ss_pat="(${(j:|:)${(@kb)style_selectors}}):" + local prs_pat="$pr_pat #" + + if (( $+opts[-e] )); then + local -a borg_excludes exclude_options=( -e --exclude --pattern ) tmp + local k cword + local -i i + for k in $exclude_options; do + if [[ -n $opt_args[$k] ]]; then + IFS=: read -A tmp <<<$opt_args[$k] + tmp=( ${(Q)tmp} ) + # lstrip style selectors and pattern rules + [[ $+opts[-p] -gt 0 || $k == --pattern ]] && tmp=( ${tmp#$~prs_pat} ) + tmp=( ${tmp#$~ss_pat} ) + + # don't take into account the word under the cursor + cword="$PREFIX$SUFFIX" + [[ $compstate[quote] != (\'|\") ]] && cword=${(Q)cword} + [[ $+opts[-p] -gt 0 || $k == --pattern ]] && cword=${cword#$~prs_pat} + cword=${cword#$~ss_pat} + i=$tmp[(I)$cword] + (( i )) && tmp=( "${(@)tmp[1,i-1]}" "${(@)tmp[i+1,-1]}" ) + + borg_excludes+=( "$tmp[@]" ) + fi + done + [[ -n $borg_excludes ]] && args+=( -F borg_excludes ) + fi + + [[ -n ${(P)paths_varname} ]] && args+=( -W $paths_varname ) + + args+=( "$@" ) + + # lstrip style selectors and pattern rules + if (( $+opts[-p] )); then + if [[ $compstate[quote] != (\'|\") ]]; then + compset -P $pr_pat + compset -P '(\\ )#' + else + compset -P $prs_pat + fi + fi + compset -P $ss_pat + + compstate[insert]='' + compstate[list]='list force' + + _path_files "$args[@]" +} + +(( $+functions[__borg_setup_style_selectors] )) || +__borg_setup_style_selectors() { + typeset -gA style_selectors=( + fm 'Fnmatch' + sh 'Shell-style patterns' + re 'Regular expressions' + pp 'Path prefix' + pf 'Path full-match' + ) +} + +(( $+functions[__borg_skip_pattern_matching] )) || +__borg_skip_pattern_matching() { + # unset glob_complete + [[ $compstate[pattern_match] == '*' ]] && compstate[pattern_match]='' + # skip the _match completer + [[ -n $compstate[pattern_match] ]] && return 1 + return 0 +} + +(( $+functions[_borg_config] )) || +_borg_config() { + local qrepo=$1 + shift + + if (( ! $+__borg_config_sect )); then + comppostfuncs+=( __borg_unset_config ) + typeset -gH __borg_config_sect= + typeset -gHa __borg_config_keys=() + local sect line + local -a match mbegin mend + _call_program -p keys ${(q)__borg_command:-borg} config --list $qrepo 2>/dev/null | { + IFS= read -r sect + sect=${${sect#\[}%\]} + __borg_config_sect=$sect + while IFS= read -r line && [[ $line == (#b)(*)\ =\ (*) ]]; do + __borg_config_keys+=( "${${match[1]//\\/\\\\}//:/\\:}:(current: $match[2])" ) + done + } + fi + + local -a alts=( 'keys:key: _describe -t keys "key" __borg_config_keys' ) + compset -P "${__borg_config_sect}." || alts+=( 'sections:section:compadd -S "." $__borg_config_sect' ) + _alternative $alts +} + +(( $+functions[__borg_unset_config] )) || +__borg_unset_config() { + unset __borg_config_sect __borg_config_keys +} + +# A simple prefix-oriented completion function for compressors. Can be improved by supporting the suffix. +(( $+functions[_borg_compression] )) || +_borg_compression() { + local -a nolvl=( + 'none:do not compress' + 'lz4:very high speed, very low compression' + ) + local -a havelvl=( + 'zstd:("zstandart")' + 'zlib:("gz") medium speed, medium compression' + 'lzma:("xz") low speed, high compression' + ) + local -a auto=( + 'auto:compress compressible, otherwise "none"' + ) + local -a match mbegin mend + # NOTE: Zsh's `-prefix` condition is confused by the leading parenthesis in the pattern. + # Fortunately, we simply need to show a message. + if compset -P '(#b)(|auto,)(zstd|zlib|lzma),'; then + local -i from to def + case $match[2] in + (zstd) from=1 to=22 def=3 ;; + (zlib|lzma) from=0 to=9 def=6 ;; + esac + _message -e "compression level (from $from to $to, default: $def)" + elif compset -P 'auto,'; then + _describe -t compression 'compression' nolvl -- havelvl -qS, + else + _describe -t compression 'compression' nolvl -- havelvl -qS, -- auto -S, + fi +} + +(( $+functions[_borg_chunker_params] )) || +_borg_chunker_params() { + if compset -P '*,*,*,'; then + _message -e 'HASH_WINDOW_SIZE' + elif compset -P '*,*,'; then + _message -e 'HASH_MASK_BITS (target chunk size ~= 2^HASH_MASK_BITS B)' + elif compset -P '*,'; then + _message -e 'CHUNK_MAX_EXP (maximum chunk size = 2^CHUNK_MAX_EXP B)' + else + _message -e 'CHUNK_MIN_EXP (minimum chunk size = 2^CHUNK_MIN_EXP B)' + local -a params=( + 'default:19,23,21,4095' + '19,23,21,4095:small amount of chunks (default)' + '10,23,16,4095:big amount of chunks' + ) + _describe -t chunker-params 'typical chunker params' params + fi +} + +(( $+functions[_borg_statuschars] )) || +_borg_statuschars() { + _values -s '' 'STATUSCHARS' \ + 'A[regular file, added]' \ + 'M[regular file, modified]' \ + 'U[regular file, unchanged]' \ + 'E[regular file, an error happened while accessing/reading this file]' \ + 'd[directory]' \ + 'b[block device]' \ + 'c[char device]' \ + 'h[regular file, hardlink (to already seen inodes)]' \ + 's[symlink]' \ + 'f[fifo]' \ + 'i[backup data was read from standard input (stdin)]' \ + '-[dry run, item was not backed up]' \ + 'x[excluded, item was not backed up]' \ + '?[missing status code]' +} + +(( $+functions[_borg_quota_suffixes] )) || +_borg_quota_suffixes() { + if compset -P '[0-9]##'; then + local -a suffixes=( + 'K:10 ** 3 bytes' + 'M:10 ** 6 bytes' + 'G:10 ** 9 bytes' + 'T:10 ** 12 bytes' + 'P:10 ** 15 bytes' + ) + # NOTE: tag `suffixes` is already in use (file extensions) + _describe -t multiplier 'suffix' suffixes + else + _message -e 'QUOTA' + fi +} + +(( $+functions[_borg_timestamp] )) || +_borg_timestamp() { + _alternative \ + "dates:TIMESTAMP: _dates -f '%FT%T'" \ + 'files:reference:_files' +} + +(( $+functions[_borg_guard_unsigned_number] )) || +_borg_guard_unsigned_number() { + local -A opts + zparseopts -K -D -A opts M+: J+: V+: 1 2 o+: n F: x+: X+: + _guard '[0-9]#' ${1:-number} +} + +(( $+functions[_borg_guard_numeric_mode] )) || +_borg_guard_numeric_mode() { + local -A opts + zparseopts -K -D -A opts M+: J+: V+: 1 2 o+: n F: x+: X+: + _guard '[0-7](#c0,4)' ${1:-mode} +} + +_borg() { + local -a match mbegin mend line state + local curcontext="$curcontext" state_descr + typeset -A opt_args + local -i ret=1 + + if [[ $service == 'borg' ]]; then + local __borg_command=$words[1] + + local -a common_options + __borg_setup_common_options + + _arguments -s -w -C : \ + '(- :)'{-V,--version}'[show version number and exit]' \ + $common_options \ + '(-): :->command' \ + '(-)*:: :->option-or-argument' && return + + case $state in + (command) + _borg_commands && ret=0 + ;; + (option-or-argument) + curcontext="${curcontext%:*:*}:borg-$words[1]:" + + if ! _call_function ret _borg-$words[1]; then + _default && ret=0 + fi + ;; + esac + elif [[ $service == (#b)-value-,BORG_(*),-default- ]]; then + _borg_parameters $match[1] && ret=0 + elif ! _call_function ret _$service; then + _default && ret=0 + fi + + return ret +} + +_borg "$@" diff --git a/scripts/sign-binaries b/scripts/sign-binaries index e7ae657e76..4f145be751 100755 --- a/scripts/sign-binaries +++ b/scripts/sign-binaries @@ -1,9 +1,20 @@ #!/bin/bash -# usage: sign-binaries 201512312359 + +D=$1 + +if [ "$D" = "" ]; then + echo "Usage: sign-binaries 201912312359" + exit +fi + +if [ "$QUBES_GPG_DOMAIN" = "" ]; then + GPG=gpg +else + GPG=qubes-gpg-client-wrapper +fi for file in dist/borg-*; do - gpg --armor --detach-sign $file + $GPG --local-user "Thomas Waldmann" --armor --detach-sign --output $file.asc $file done -touch -t $1 dist/* - +touch -t $D dist/* diff --git a/scripts/upload-pypi b/scripts/upload-pypi new file mode 100755 index 0000000000..db0ef45f37 --- /dev/null +++ b/scripts/upload-pypi @@ -0,0 +1,18 @@ +#!/bin/bash + +R=$1 + +if [ "$R" = "" ]; then + echo "Usage: upload-pypi 1.2.3 [test]" + exit +fi + +if [ "$2" = "test" ]; then + export TWINE_REPOSITORY_URL=https://test.pypi.org/legacy/ +else + export TWINE_REPOSITORY_URL= +fi + +D=dist/borgbackup-$R.tar.gz + +twine upload $D.asc $D diff --git a/setup.cfg b/setup.cfg index 93f009af50..eb08932b42 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,12 +1,81 @@ [tool:pytest] python_files = testsuite/*.py +markers = + allow_cache_wipe [flake8] +# for reference ... +# E121 continuation line under-indented for hanging indent +# E122 continuation line missing indentation or outdented +# E123 closing bracket does not match indentation of opening bracket's line +# E125 continuation line with same indent as next logical line +# E126 continuation line over-indented for hanging indent +# E127 continuation line over-indented for visual indent +# E128 continuation line under-indented for visual indent +# E221 multiple spaces before operator +# E226 missing whitespace around arithmetic operator +# E261 at least two spaces before inline comment +# E265 block comment should start with '# ' +# E301 expected 1 blank line +# E305 expected 2 blank lines after class or function definition +# E401 multiple imports on one line +# E402 module level import not at top +# E501 line too long +# E722 do not use bare except +# E731 do not assign a lambda expression, use def +# E741 ambiguous variable name +# F401 import unsused +# F403 from ... import * used, unable to detect undefined names +# F405 undefined or defined from star imports +# F811 redef of unused var +# F821 undefined name +# W391 blank line at end of file +# #### Pick either W503, or W504 - latest recommendation from pep8 is to ignore W503 +# W503 line break before binary operator +# W504 line break after binary operator + +# borg code style guidelines: +ignore = E226, W503 + +# Code style violation exceptions: # please note that the values are adjusted so that they do not cause failures # with existing code. if you want to change them, you should first fix all # flake8 failures that appear with your change. -ignore = E122,E123,E125,E126,E127,E128,E226,E402,F401,F405,F811 -# line length long term target: 120 -max-line-length = 255 -exclude = build,dist,.git,.idea,.cache,.tox,docs/conf.py +per_file_ignores = + docs/conf.py:E121,E126,E265,E305,E401,E402, + src/borg/archive.py:E122,E125,E127,E402,E501,F401,F405,W504 + src/borg/archiver.py:E126,E127,E128,E501,E722,E731,E741,F401,F405,W504 + src/borg/cache.py:E127,E128,E402,E501,E722,W504 + src/borg/fuse.py:E402,E501,E722,W504 + src/borg/helpers.py:E127,E402,E501,E722,E741,F401,F405 + src/borg/locking.py:E501,E722 + src/borg/remote.py:E128,E501,F405 + src/borg/repository.py:E126,E501,F401,F405,W504 + src/borg/shellpattern.py:E501 + src/borg/upgrader.py:E501 + src/borg/algorithms/msgpack/*.py:E127,E221,E261,E301,E402,E722,E731,F401,F403,F821,W391 + src/borg/crypto/key.py:E125,E128,E402,E501,F405,W504 + src/borg/crypto/keymanager.py:E126,E128,E501,F401 + src/borg/crypto/nonces.py:E501 + src/borg/platform/__init__.py:F401,F811 + src/borg/testsuite/__init__.py:E501,F401 + src/borg/testsuite/archive.py:E128,W504 + src/borg/testsuite/archiver.py:E128,E501,E722,F401,F405,F811 + src/borg/testsuite/benchmark.py:F401,F811 + src/borg/testsuite/chunker.py:E501,F405 + src/borg/testsuite/compress.py:F401 + src/borg/testsuite/crypto.py:E126,E501,E741 + src/borg/testsuite/file_integrity.py:F401 + src/borg/testsuite/hashindex.py:F401 + src/borg/testsuite/helpers.py:E126,E127,E501,F401 + src/borg/testsuite/key.py:E501 + src/borg/testsuite/locking.py:E126,E722,F401 + src/borg/testsuite/patterns.py:E123 + src/borg/testsuite/platform.py:E501,F401,F811 + src/borg/testsuite/repository.py:E501,F401 + src/borg/testsuite/shellpattern.py:E123 + src/borg/testsuite/upgrader.py:F405 + +max_line_length = 120 +exclude = build,dist,.git,.idea,.cache,.tox diff --git a/setup.py b/setup.py index dd85db6a1e..e20ac1afed 100644 --- a/setup.py +++ b/setup.py @@ -3,16 +3,31 @@ import io import re import sys +import textwrap from collections import OrderedDict from datetime import datetime from glob import glob -from distutils.command.build import build -from distutils.core import Command +import setup_lz4 +import setup_zstd +import setup_b2 +import setup_xxhash -import textwrap +# True: use the shared liblz4 (>= 1.7.0 / r129) from the system, False: use the bundled lz4 code +prefer_system_liblz4 = True + +# True: use the shared libzstd (>= 1.3.0) from the system, False: use the bundled zstd code +prefer_system_libzstd = True + +# True: use the shared libb2 from the system, False: use the bundled blake2 code +prefer_system_libb2 = True + +# True: use the shared libxxhash (>= 0.6.5 [>= 0.7.2 on ARM]) from the system, False: use the bundled xxhash code +prefer_system_libxxhash = True + +# prefer_system_msgpack is another option, but you need to set it in src/borg/helpers.py. -min_python = (3, 4) +min_python = (3, 5) my_python = sys.version_info if my_python < min_python: @@ -22,33 +37,25 @@ # Are we building on ReadTheDocs? on_rtd = os.environ.get('READTHEDOCS') -# msgpack pure python data corruption was fixed in 0.4.6. -# Also, we might use some rather recent API features. -install_requires = ['msgpack-python>=0.4.6', ] +install_requires = [ + 'packaging', +] # note for package maintainers: if you package borgbackup for distribution, # please add llfuse as a *requirement* on all platforms that have a working # llfuse package. "borg mount" needs llfuse to work. # if you do not have llfuse, do not require it, most of borgbackup will work. extras_require = { - # llfuse 0.40 (tested, proven, ok), needs FUSE version >= 2.8.0 - # llfuse 0.41 (tested shortly, looks ok), needs FUSE version >= 2.8.0 - # llfuse 0.41.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0 - # llfuse 0.42 (tested shortly, looks ok), needs FUSE version >= 2.8.0 - # llfuse 1.0 (tested shortly, looks ok), needs FUSE version >= 2.8.0 - # llfuse 1.1.1 (tested shortly, looks ok), needs FUSE version >= 2.8.0 - # llfuse 2.0 will break API - 'fuse': ['llfuse<2.0', ], + 'fuse': [ + # 1.3.8 is the fixed version that works on py39 AND freebsd. + # if you cythonize yourself and make sure llfuse works for your + # OS and python version, you can use other versions than 1.3.8, too. + 'llfuse >=1.3.4', # should nowadays pull 1.3.8 or better + ], } -if sys.platform.startswith('freebsd'): - # llfuse was frequently broken / did not build on freebsd - # llfuse 0.41.1, 1.1 are ok - extras_require['fuse'] = ['llfuse <2.0, !=0.42.*, !=0.43, !=1.0', ] - -from setuptools import setup, find_packages, Extension +from setuptools import setup, find_packages, Extension, Command from setuptools.command.sdist import sdist -from distutils.command.clean import clean compress_source = 'src/borg/compress.pyx' crypto_ll_source = 'src/borg/crypto/low_level.pyx' @@ -58,31 +65,43 @@ checksums_source = 'src/borg/algorithms/checksums.pyx' platform_posix_source = 'src/borg/platform/posix.pyx' platform_linux_source = 'src/borg/platform/linux.pyx' +platform_syncfilerange_source = 'src/borg/platform/syncfilerange.pyx' platform_darwin_source = 'src/borg/platform/darwin.pyx' platform_freebsd_source = 'src/borg/platform/freebsd.pyx' +msgpack_packer_source = 'src/borg/algorithms/msgpack/_packer.pyx' +msgpack_unpacker_source = 'src/borg/algorithms/msgpack/_unpacker.pyx' -cython_sources = [ +cython_c_sources = [ + # these .pyx will get compiled to .c compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source, - platform_posix_source, platform_linux_source, + platform_syncfilerange_source, platform_freebsd_source, platform_darwin_source, ] +cython_cpp_sources = [ + # these .pyx will get compiled to .cpp + msgpack_packer_source, + msgpack_unpacker_source, +] + try: from Cython.Distutils import build_ext import Cython.Compiler.Main as cython_compiler class Sdist(sdist): def __init__(self, *args, **kwargs): - for src in cython_sources: + for src in cython_c_sources: cython_compiler.compile(src, cython_compiler.default_options) + for src in cython_cpp_sources: + cython_compiler.compile(src, cplus=True) super().__init__(*args, **kwargs) def make_distribution(self): @@ -99,8 +118,11 @@ def make_distribution(self): 'src/borg/algorithms/xxh64/xxhash.h', 'src/borg/algorithms/xxh64/xxhash.c', 'src/borg/platform/posix.c', 'src/borg/platform/linux.c', + 'src/borg/platform/syncfilerange.c', 'src/borg/platform/freebsd.c', 'src/borg/platform/darwin.c', + 'src/borg/algorithms/msgpack/_packer.cpp', + 'src/borg/algorithms/msgpack/_unpacker.cpp', ]) super().make_distribution() @@ -117,12 +139,18 @@ def __init__(self, *args, **kwargs): checksums_source = checksums_source.replace('.pyx', '.c') platform_posix_source = platform_posix_source.replace('.pyx', '.c') platform_linux_source = platform_linux_source.replace('.pyx', '.c') + platform_syncfilerange_source = platform_syncfilerange_source.replace('.pyx', '.c') platform_freebsd_source = platform_freebsd_source.replace('.pyx', '.c') platform_darwin_source = platform_darwin_source.replace('.pyx', '.c') - from distutils.command.build_ext import build_ext + + msgpack_packer_source = msgpack_packer_source.replace('.pyx', '.cpp') + msgpack_unpacker_source = msgpack_unpacker_source.replace('.pyx', '.cpp') + + from setuptools.command.build_ext import build_ext if not on_rtd and not all(os.path.exists(path) for path in [ compress_source, crypto_ll_source, chunker_source, hashindex_source, item_source, checksums_source, - platform_posix_source, platform_linux_source, platform_freebsd_source, platform_darwin_source]): + platform_posix_source, platform_linux_source, platform_syncfilerange_source, platform_freebsd_source, platform_darwin_source, + msgpack_packer_source, msgpack_unpacker_source]): raise ImportError('The GIT version of Borg needs Cython. Install Cython or use a released version.') @@ -130,36 +158,17 @@ def detect_openssl(prefixes): for prefix in prefixes: filename = os.path.join(prefix, 'include', 'openssl', 'evp.h') if os.path.exists(filename): - with open(filename, 'r') as fd: - if 'PKCS5_PBKDF2_HMAC(' in fd.read(): - return prefix - - -def detect_lz4(prefixes): - for prefix in prefixes: - filename = os.path.join(prefix, 'include', 'lz4.h') - if os.path.exists(filename): - with open(filename, 'r') as fd: - if 'LZ4_decompress_safe' in fd.read(): - return prefix - - -def detect_libb2(prefixes): - for prefix in prefixes: - filename = os.path.join(prefix, 'include', 'blake2.h') - if os.path.exists(filename): - with open(filename, 'r') as fd: - if 'blake2b_init' in fd.read(): + with open(filename, 'rb') as fd: + if b'PKCS5_PBKDF2_HMAC(' in fd.read(): return prefix include_dirs = [] library_dirs = [] define_macros = [] -crypto_libraries = ['crypto'] possible_openssl_prefixes = ['/usr', '/usr/local', '/usr/local/opt/openssl', '/usr/local/ssl', '/usr/local/openssl', - '/usr/local/borg', '/opt/local', '/opt/pkg', ] + '/usr/local/borg', '/opt/local', '/opt/pkg', '/opt/homebrew/opt/openssl@1.1', ] if os.environ.get('BORG_OPENSSL_PREFIX'): possible_openssl_prefixes.insert(0, os.environ.get('BORG_OPENSSL_PREFIX')) ssl_prefix = detect_openssl(possible_openssl_prefixes) @@ -169,36 +178,63 @@ def detect_libb2(prefixes): library_dirs.append(os.path.join(ssl_prefix, 'lib')) -possible_lz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4', +possible_liblz4_prefixes = ['/usr', '/usr/local', '/usr/local/opt/lz4', '/usr/local/lz4', '/usr/local/borg', '/opt/local', '/opt/pkg', ] -if os.environ.get('BORG_LZ4_PREFIX'): - possible_lz4_prefixes.insert(0, os.environ.get('BORG_LZ4_PREFIX')) -lz4_prefix = detect_lz4(possible_lz4_prefixes) -if lz4_prefix: - include_dirs.append(os.path.join(lz4_prefix, 'include')) - library_dirs.append(os.path.join(lz4_prefix, 'lib')) -elif not on_rtd: - raise Exception('Unable to find LZ4 headers. (Looked here: {})'.format(', '.join(possible_lz4_prefixes))) +if os.environ.get('BORG_LIBLZ4_PREFIX'): + possible_liblz4_prefixes.insert(0, os.environ.get('BORG_LIBLZ4_PREFIX')) +liblz4_prefix = setup_lz4.lz4_system_prefix(possible_liblz4_prefixes) +if prefer_system_liblz4 and liblz4_prefix: + print('Detected and preferring liblz4 over bundled LZ4') + define_macros.append(('BORG_USE_LIBLZ4', 'YES')) + liblz4_system = True +else: + liblz4_system = False possible_libb2_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libb2', '/usr/local/libb2', '/usr/local/borg', '/opt/local', '/opt/pkg', ] if os.environ.get('BORG_LIBB2_PREFIX'): possible_libb2_prefixes.insert(0, os.environ.get('BORG_LIBB2_PREFIX')) -libb2_prefix = detect_libb2(possible_libb2_prefixes) -if libb2_prefix: +libb2_prefix = setup_b2.b2_system_prefix(possible_libb2_prefixes) +if prefer_system_libb2 and libb2_prefix: print('Detected and preferring libb2 over bundled BLAKE2') - include_dirs.append(os.path.join(libb2_prefix, 'include')) - library_dirs.append(os.path.join(libb2_prefix, 'lib')) - crypto_libraries.append('b2') define_macros.append(('BORG_USE_LIBB2', 'YES')) + libb2_system = True +else: + libb2_system = False + +possible_libzstd_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libzstd', '/usr/local/libzstd', + '/usr/local/borg', '/opt/local', '/opt/pkg', ] +if os.environ.get('BORG_LIBZSTD_PREFIX'): + possible_libzstd_prefixes.insert(0, os.environ.get('BORG_LIBZSTD_PREFIX')) +libzstd_prefix = setup_zstd.zstd_system_prefix(possible_libzstd_prefixes) +if prefer_system_libzstd and libzstd_prefix: + print('Detected and preferring libzstd over bundled ZSTD') + define_macros.append(('BORG_USE_LIBZSTD', 'YES')) + libzstd_system = True +else: + libzstd_system = False + +possible_libxxhash_prefixes = ['/usr', '/usr/local', '/usr/local/opt/libxxhash', '/usr/local/libxxhash', + '/usr/local/borg', '/opt/local', '/opt/pkg', ] +if os.environ.get('BORG_LIBXXHASH_PREFIX'): + possible_libxxhash_prefixes.insert(0, os.environ.get('BORG_LIBXXHASH_PREFIX')) +libxxhash_prefix = setup_xxhash.xxhash_system_prefix(possible_libxxhash_prefixes) +if prefer_system_libxxhash and libxxhash_prefix: + print('Detected and preferring libxxhash over bundled XXHASH') + define_macros.append(('BORG_USE_LIBXXHASH', 'YES')) + libxxhash_system = True +else: + libxxhash_system = False with open('README.rst', 'r') as fd: long_description = fd.read() + # remove header, but have one \n before first headline + start = long_description.find('What is BorgBackup?') + assert start >= 0 + long_description = '\n' + long_description[start:] # remove badges long_description = re.compile(r'^\.\. start-badges.*^\.\. end-badges', re.M | re.S).sub('', long_description) - # remove |substitutions| - long_description = re.compile(r'\|screencast\|').sub('', long_description) # remove unknown directives long_description = re.compile(r'^\.\. highlight:: \w+$', re.M).sub('', long_description) @@ -236,10 +272,13 @@ def run(self): # allows us to build docs without the C modules fully loaded during help generation from borg.archiver import Archiver parser = Archiver(prog='borg').build_parser() + # borgfs has a separate man page to satisfy debian's "every program from a package + # must have a man page" requirement, but it doesn't need a separate HTML docs page + #borgfs_parser = Archiver(prog='borgfs').build_parser() self.generate_level("", parser, Archiver) - def generate_level(self, prefix, parser, Archiver): + def generate_level(self, prefix, parser, Archiver, extra_choices=None): is_subcommand = False choices = {} for action in parser._actions: @@ -247,6 +286,8 @@ def generate_level(self, prefix, parser, Archiver): is_subcommand = True for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser + if extra_choices is not None: + choices.update(extra_choices) if prefix and not choices: return print('found commands: %s' % list(choices.keys())) @@ -499,12 +540,13 @@ def run(self): # allows us to build docs without the C modules fully loaded during help generation from borg.archiver import Archiver parser = Archiver(prog='borg').build_parser() + borgfs_parser = Archiver(prog='borgfs').build_parser() - self.generate_level('', parser, Archiver) + self.generate_level('', parser, Archiver, {'borgfs': borgfs_parser}) self.build_topic_pages(Archiver) self.build_intro_page() - def generate_level(self, prefix, parser, Archiver): + def generate_level(self, prefix, parser, Archiver, extra_choices=None): is_subcommand = False choices = {} for action in parser._actions: @@ -512,6 +554,8 @@ def generate_level(self, prefix, parser, Archiver): is_subcommand = True for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser + if extra_choices is not None: + choices.update(extra_choices) if prefix and not choices: return @@ -519,7 +563,10 @@ def generate_level(self, prefix, parser, Archiver): if command.startswith('debug') or command == 'help': continue - man_title = 'borg-' + command.replace(' ', '-') + if command == "borgfs": + man_title = command + else: + man_title = 'borg-' + command.replace(' ', '-') print('building man page', man_title + '(1)', file=sys.stderr) is_intermediary = self.generate_level(command + ' ', parser, Archiver) @@ -534,7 +581,10 @@ def generate_level(self, prefix, parser, Archiver): write('| borg', '[common options]', command, subcommand, '...') self.see_also.setdefault(command, []).append('%s-%s' % (command, subcommand)) else: - write('borg', '[common options]', command, end='') + if command == "borgfs": + write(command, end='') + else: + write('borg', '[common options]', command, end='') self.write_usage(write, parser) write('\n') @@ -635,6 +685,8 @@ def write_examples(self, write, command): examples = examples.replace(usage_include, '') examples = examples.replace('Examples\n~~~~~~~~', '') examples = examples.replace('Miscellaneous Help\n------------------', '') + examples = examples.replace('``docs/misc/prune-example.txt``:', '``docs/misc/prune-example.txt``.') + examples = examples.replace('.. highlight:: none\n', '') # we don't support highlight examples = re.sub('^(~+)$', lambda matches: '+' * len(matches.group(0)), examples, flags=re.MULTILINE) examples = examples.strip() if examples: @@ -651,9 +703,16 @@ def write_see_also(self, write, man_title): def gen_man_page(self, name, rst): from docutils.writers import manpage from docutils.core import publish_string + from docutils.nodes import inline + from docutils.parsers.rst import roles + + def issue(name, rawtext, text, lineno, inliner, options={}, content=[]): + return [inline(rawtext, '#' + text)], [] + + roles.register_local_role('issue', issue) # We give the source_path so that docutils can find relative includes # as-if the document where located in the docs/ directory. - man_page = publish_string(source=rst, source_path='docs/virtmanpage.rst', writer=manpage.Writer()) + man_page = publish_string(source=rst, source_path='docs/%s.rst' % name, writer=manpage.Writer()) with open('docs/man/%s.1' % name, 'wb') as fd: fd.write(man_page) @@ -708,12 +767,23 @@ def rm(file): pass -class Clean(clean): +class Clean(Command): + user_options = [] + + def initialize_options(self): + pass + + def finalize_options(self): + pass + def run(self): - super().run() - for source in cython_sources: + for source in cython_c_sources: genc = source.replace('.pyx', '.c') rm(genc) + for source in cython_cpp_sources: + gencpp = source.replace('.pyx', '.cpp') + rm(gencpp) + for source in cython_c_sources + cython_cpp_sources: compiled_glob = source.replace('.pyx', '.cpython*') for compiled in sorted(glob(compiled_glob)): rm(compiled) @@ -723,25 +793,61 @@ def run(self): 'build_usage': build_usage, 'build_man': build_man, 'sdist': Sdist, - 'clean': Clean, + 'clean2': Clean, } ext_modules = [] if not on_rtd: - ext_modules += [ - Extension('borg.compress', [compress_source], libraries=['lz4'], include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros), - Extension('borg.crypto.low_level', [crypto_ll_source], libraries=crypto_libraries, include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros), - Extension('borg.hashindex', [hashindex_source]), - Extension('borg.item', [item_source]), - Extension('borg.chunker', [chunker_source]), - Extension('borg.algorithms.checksums', [checksums_source]), + compress_ext_kwargs = dict(sources=[compress_source], include_dirs=include_dirs, library_dirs=library_dirs, + define_macros=define_macros) + compress_ext_kwargs = setup_lz4.lz4_ext_kwargs(bundled_path='src/borg/algorithms/lz4', + system_prefix=liblz4_prefix, system=liblz4_system, + **compress_ext_kwargs) + compress_ext_kwargs = setup_zstd.zstd_ext_kwargs(bundled_path='src/borg/algorithms/zstd', + system_prefix=libzstd_prefix, system=libzstd_system, + multithreaded=False, legacy=False, **compress_ext_kwargs) + crypto_ext_kwargs = dict(sources=[crypto_ll_source], libraries=['crypto'], + include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros) + crypto_ext_kwargs = setup_b2.b2_ext_kwargs(bundled_path='src/borg/algorithms/blake2', + system_prefix=libb2_prefix, system=libb2_system, + **crypto_ext_kwargs) + + crypto_ext_kwargs = setup_xxhash.xxhash_ext_kwargs(bundled_path='src/borg/algorithms/xxh64', + system_prefix=libxxhash_prefix, system=libxxhash_system, + **crypto_ext_kwargs) + + msgpack_endian = '__BIG_ENDIAN__' if (sys.byteorder == 'big') else '__LITTLE_ENDIAN__' + msgpack_macros = [(msgpack_endian, '1')] + msgpack_packer_ext_kwargs = dict( + sources=[msgpack_packer_source], + include_dirs=include_dirs, + library_dirs=library_dirs, + define_macros=msgpack_macros, + language='c++', + ) + msgpack_unpacker_ext_kwargs = dict( + sources=[msgpack_unpacker_source], + include_dirs=include_dirs, + library_dirs=library_dirs, + define_macros=msgpack_macros, + language='c++', + ) -] + ext_modules += [ + Extension('borg.algorithms.msgpack._packer', **msgpack_packer_ext_kwargs), + Extension('borg.algorithms.msgpack._unpacker', **msgpack_unpacker_ext_kwargs), + Extension('borg.compress', **compress_ext_kwargs), + Extension('borg.crypto.low_level', **crypto_ext_kwargs), + Extension('borg.hashindex', [hashindex_source]), + Extension('borg.item', [item_source]), + Extension('borg.chunker', [chunker_source]), + Extension('borg.algorithms.checksums', [checksums_source]), + ] if not sys.platform.startswith(('win32', )): ext_modules.append(Extension('borg.platform.posix', [platform_posix_source])) - if sys.platform == 'linux': ext_modules.append(Extension('borg.platform.linux', [platform_linux_source], libraries=['acl'])) + ext_modules.append(Extension('borg.platform.syncfilerange', [platform_syncfilerange_source])) elif sys.platform.startswith('freebsd'): ext_modules.append(Extension('borg.platform.freebsd', [platform_freebsd_source])) elif sys.platform == 'darwin': @@ -771,9 +877,11 @@ def run(self): 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'Topic :: Security :: Cryptography', 'Topic :: System :: Archiving :: Backup', ], @@ -786,10 +894,12 @@ def run(self): 'borgfs = borg.archiver:main', ] }, + # See also the MANIFEST.in file. + # We want to install all the files in the package directories... include_package_data=True, - package_data={ - 'borg': ['paperkey.html'], - 'borg.testsuite': ['attic.tar.gz'], + # ...except the source files which have been compiled (C extensions): + exclude_package_data={ + '': ['*.c', '*.h', '*.pyx', ], }, cmdclass=cmdclass, ext_modules=ext_modules, diff --git a/setup_b2.py b/setup_b2.py new file mode 100644 index 0000000000..e775d9b721 --- /dev/null +++ b/setup_b2.py @@ -0,0 +1,72 @@ +# Support code for building a C extension with blake2 files +# +# Copyright (c) 2016-present, Gregory Szorc (original code for zstd) +# 2017-present, Thomas Waldmann (mods to make it more generic, code for blake2) +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import os + +# b2 files, structure as seen in BLAKE2 (reference implementation) project repository: + +b2_sources = [ + 'ref/blake2b-ref.c', +] + +b2_includes = [ + 'ref', +] + + +def b2_system_prefix(prefixes): + for prefix in prefixes: + filename = os.path.join(prefix, 'include', 'blake2.h') + if os.path.exists(filename): + with open(filename, 'rb') as fd: + if b'blake2b_init' in fd.read(): + return prefix + + +def b2_ext_kwargs(bundled_path, system_prefix=None, system=False, **kwargs): + """amend kwargs with b2 stuff for a distutils.extension.Extension initialization. + + bundled_path: relative (to this file) path to the bundled library source code files + system_prefix: where the system-installed library can be found + system: True: use the system-installed shared library, False: use the bundled library code + kwargs: distutils.extension.Extension kwargs that should be amended + returns: amended kwargs + """ + def multi_join(paths, *path_segments): + """apply os.path.join on a list of paths""" + return [os.path.join(*(path_segments + (path, ))) for path in paths] + + use_system = system and system_prefix is not None + + sources = kwargs.get('sources', []) + if not use_system: + sources += multi_join(b2_sources, bundled_path) + + include_dirs = kwargs.get('include_dirs', []) + if use_system: + include_dirs += multi_join(['include'], system_prefix) + else: + include_dirs += multi_join(b2_includes, bundled_path) + + library_dirs = kwargs.get('library_dirs', []) + if use_system: + library_dirs += multi_join(['lib'], system_prefix) + + libraries = kwargs.get('libraries', []) + if use_system: + libraries += ['b2', ] + + extra_compile_args = kwargs.get('extra_compile_args', []) + if not use_system: + extra_compile_args += [] # not used yet + + ret = dict(**kwargs) + ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, + include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) + return ret diff --git a/setup_lz4.py b/setup_lz4.py new file mode 100644 index 0000000000..e6d0ddb410 --- /dev/null +++ b/setup_lz4.py @@ -0,0 +1,72 @@ +# Support code for building a C extension with lz4 files +# +# Copyright (c) 2016-present, Gregory Szorc (original code for zstd) +# 2017-present, Thomas Waldmann (mods to make it more generic, code for lz4) +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import os + +# lz4 files, structure as seen in lz4 project repository: + +lz4_sources = [ + 'lib/lz4.c', +] + +lz4_includes = [ + 'lib', +] + + +def lz4_system_prefix(prefixes): + for prefix in prefixes: + filename = os.path.join(prefix, 'include', 'lz4.h') + if os.path.exists(filename): + with open(filename, 'rb') as fd: + if b'LZ4_compress_default' in fd.read(): # requires lz4 >= 1.7.0 (r129) + return prefix + + +def lz4_ext_kwargs(bundled_path, system_prefix=None, system=False, **kwargs): + """amend kwargs with lz4 stuff for a distutils.extension.Extension initialization. + + bundled_path: relative (to this file) path to the bundled library source code files + system_prefix: where the system-installed library can be found + system: True: use the system-installed shared library, False: use the bundled library code + kwargs: distutils.extension.Extension kwargs that should be amended + returns: amended kwargs + """ + def multi_join(paths, *path_segments): + """apply os.path.join on a list of paths""" + return [os.path.join(*(path_segments + (path, ))) for path in paths] + + use_system = system and system_prefix is not None + + sources = kwargs.get('sources', []) + if not use_system: + sources += multi_join(lz4_sources, bundled_path) + + include_dirs = kwargs.get('include_dirs', []) + if use_system: + include_dirs += multi_join(['include'], system_prefix) + else: + include_dirs += multi_join(lz4_includes, bundled_path) + + library_dirs = kwargs.get('library_dirs', []) + if use_system: + library_dirs += multi_join(['lib'], system_prefix) + + libraries = kwargs.get('libraries', []) + if use_system: + libraries += ['lz4', ] + + extra_compile_args = kwargs.get('extra_compile_args', []) + if not use_system: + extra_compile_args += [] # not used yet + + ret = dict(**kwargs) + ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, + include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) + return ret diff --git a/setup_xxhash.py b/setup_xxhash.py new file mode 100644 index 0000000000..244e2323b1 --- /dev/null +++ b/setup_xxhash.py @@ -0,0 +1,73 @@ +# Support code for building a C extension with xxhash files +# +# Copyright (c) 2016-present, Gregory Szorc (original code for zstd) +# 2017-present, Thomas Waldmann (mods to make it more generic, code for blake2) +# 2020-present, Gianfranco Costamagna (code for xxhash) +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import os + +# xxhash files, structure as seen in XXHASH (reference implementation) project repository: + +xxhash_sources = [ + 'xxhash.c', +] + +xxhash_includes = [ + '.', +] + + +def xxhash_system_prefix(prefixes): + for prefix in prefixes: + filename = os.path.join(prefix, 'include', 'xxhash.h') + if os.path.exists(filename): + with open(filename, 'rb') as fd: + if b'XXH64_digest' in fd.read(): + return prefix + + +def xxhash_ext_kwargs(bundled_path, system_prefix=None, system=False, **kwargs): + """amend kwargs with xxhash stuff for a distutils.extension.Extension initialization. + + bundled_path: relative (to this file) path to the bundled library source code files + system_prefix: where the system-installed library can be found + system: True: use the system-installed shared library, False: use the bundled library code + kwargs: distutils.extension.Extension kwargs that should be amended + returns: amended kwargs + """ + def multi_join(paths, *path_segments): + """apply os.path.join on a list of paths""" + return [os.path.join(*(path_segments + (path, ))) for path in paths] + + use_system = system and system_prefix is not None + + sources = kwargs.get('sources', []) + if not use_system: + sources += multi_join(xxhash_sources, bundled_path) + + include_dirs = kwargs.get('include_dirs', []) + if use_system: + include_dirs += multi_join(['include'], system_prefix) + else: + include_dirs += multi_join(xxhash_includes, bundled_path) + + library_dirs = kwargs.get('library_dirs', []) + if use_system: + library_dirs += multi_join(['lib'], system_prefix) + + libraries = kwargs.get('libraries', []) + if use_system: + libraries += ['xxhash', ] + + extra_compile_args = kwargs.get('extra_compile_args', []) + if not use_system: + extra_compile_args += [] # not used yet + + ret = dict(**kwargs) + ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, + include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) + return ret diff --git a/setup_zstd.py b/setup_zstd.py new file mode 100644 index 0000000000..0f69766d17 --- /dev/null +++ b/setup_zstd.py @@ -0,0 +1,133 @@ +# Support code for building a C extension with zstd files +# +# Copyright (c) 2016-present, Gregory Szorc +# 2017-present, Thomas Waldmann (mods to make it more generic) +# All rights reserved. +# +# This software may be modified and distributed under the terms +# of the BSD license. See the LICENSE file for details. + +import os + +# zstd files, structure as seen in zstd project repository: + +zstd_sources = [ + 'lib/common/debug.c', + 'lib/common/entropy_common.c', + 'lib/common/error_private.c', + 'lib/common/fse_decompress.c', + 'lib/common/pool.c', + 'lib/common/threading.c', + 'lib/common/xxhash.c', + 'lib/common/zstd_common.c', + 'lib/compress/fse_compress.c', + 'lib/compress/hist.c', + 'lib/compress/huf_compress.c', + 'lib/compress/zstd_compress.c', + 'lib/compress/zstd_compress_literals.c', + 'lib/compress/zstd_compress_sequences.c', + 'lib/compress/zstd_compress_superblock.c', + 'lib/compress/zstd_double_fast.c', + 'lib/compress/zstd_fast.c', + 'lib/compress/zstd_lazy.c', + 'lib/compress/zstd_ldm.c', + 'lib/compress/zstd_opt.c', + 'lib/compress/zstdmt_compress.c', + 'lib/decompress/huf_decompress.c', + 'lib/decompress/zstd_ddict.c', + 'lib/decompress/zstd_decompress.c', + 'lib/decompress/zstd_decompress_block.c', + 'lib/dictBuilder/cover.c', + 'lib/dictBuilder/divsufsort.c', + 'lib/dictBuilder/fastcover.c', + 'lib/dictBuilder/zdict.c', +] + +zstd_sources_legacy = [ + 'lib/deprecated/zbuff_common.c', + 'lib/deprecated/zbuff_compress.c', + 'lib/deprecated/zbuff_decompress.c', + 'lib/legacy/zstd_v01.c', + 'lib/legacy/zstd_v02.c', + 'lib/legacy/zstd_v03.c', + 'lib/legacy/zstd_v04.c', + 'lib/legacy/zstd_v05.c', + 'lib/legacy/zstd_v06.c', + 'lib/legacy/zstd_v07.c', +] + +zstd_includes = [ + 'lib', + 'lib/common', + 'lib/compress', + 'lib/decompress', + 'lib/dictBuilder', +] + +zstd_includes_legacy = [ + 'lib/deprecated', + 'lib/legacy', +] + + +def zstd_system_prefix(prefixes): + for prefix in prefixes: + filename = os.path.join(prefix, 'include', 'zstd.h') + if os.path.exists(filename): + with open(filename, 'rb') as fd: + if b'ZSTD_getFrameContentSize' in fd.read(): # checks for zstd >= 1.3.0 + return prefix + + +def zstd_ext_kwargs(bundled_path, system_prefix=None, system=False, multithreaded=False, legacy=False, **kwargs): + """amend kwargs with zstd suff for a distutils.extension.Extension initialization. + + bundled_path: relative (to this file) path to the bundled library source code files + system_prefix: where the system-installed library can be found + system: True: use the system-installed shared library, False: use the bundled library code + multithreaded: True: define ZSTD_MULTITHREAD + legacy: include legacy API support + kwargs: distutils.extension.Extension kwargs that should be amended + returns: amended kwargs + """ + def multi_join(paths, *path_segments): + """apply os.path.join on a list of paths""" + return [os.path.join(*(path_segments + (path, ))) for path in paths] + + use_system = system and system_prefix is not None + + sources = kwargs.get('sources', []) + if not use_system: + sources += multi_join(zstd_sources, bundled_path) + if legacy: + sources += multi_join(zstd_sources_legacy, bundled_path) + + include_dirs = kwargs.get('include_dirs', []) + if use_system: + include_dirs += multi_join(['include'], system_prefix) + else: + include_dirs += multi_join(zstd_includes, bundled_path) + if legacy: + include_dirs += multi_join(zstd_includes_legacy, bundled_path) + + library_dirs = kwargs.get('library_dirs', []) + if use_system: + library_dirs += multi_join(['lib'], system_prefix) + + libraries = kwargs.get('libraries', []) + if use_system: + libraries += ['zstd', ] + + extra_compile_args = kwargs.get('extra_compile_args', []) + if multithreaded: + extra_compile_args += ['-DZSTD_MULTITHREAD', ] + if not use_system: + extra_compile_args += ['-DZSTDLIB_VISIBILITY=', '-DZDICTLIB_VISIBILITY=', '-DZSTDERRORLIB_VISIBILITY=', ] + # '-fvisibility=hidden' does not work, doesn't find PyInit_compress then + if legacy: + extra_compile_args += ['-DZSTD_LEGACY_SUPPORT=1', ] + + ret = dict(**kwargs) + ret.update(dict(sources=sources, extra_compile_args=extra_compile_args, + include_dirs=include_dirs, library_dirs=library_dirs, libraries=libraries)) + return ret diff --git a/src/borg/__init__.py b/src/borg/__init__.py index c2b201860a..9f13c7d641 100644 --- a/src/borg/__init__.py +++ b/src/borg/__init__.py @@ -1,11 +1,12 @@ -from distutils.version import LooseVersion +from packaging.version import parse as parse_version # IMPORTANT keep imports from borg here to a minimum because our testsuite depends on -# beeing able to import borg.constants and then monkey patching borg.constants.PBKDF2_ITERATIONS +# being able to import borg.constants and then monkey patching borg.constants.PBKDF2_ITERATIONS from ._version import version as __version__ -__version_tuple__ = tuple(LooseVersion(__version__).version[:3]) +_v = parse_version(__version__) +__version_tuple__ = _v._version.release # assert that all semver components are integers # this is mainly to show errors when people repackage poorly diff --git a/src/borg/_chunker.c b/src/borg/_chunker.c index b72f6bd8b1..4add32ac3d 100644 --- a/src/borg/_chunker.c +++ b/src/borg/_chunker.c @@ -63,7 +63,7 @@ static uint32_t table_base[] = 0xc5ae37bb, 0xa76ce12a, 0x8150d8f3, 0x2ec29218, 0xa35f0984, 0x48c0647e, 0x0b5ff98c, 0x71893f7b }; -#define BARREL_SHIFT(v, shift) ( ((v) << shift) | ((v) >> ((32 - shift) & 0x1f)) ) +#define BARREL_SHIFT(v, shift) ( ((v) << (shift)) | ((v) >> ((32 - (shift)) & 0x1f)) ) size_t pagemask; diff --git a/src/borg/_hashindex.c b/src/borg/_hashindex.c index 9fbd1ebda1..438c49906c 100644 --- a/src/borg/_hashindex.c +++ b/src/borg/_hashindex.c @@ -71,7 +71,7 @@ static int hash_sizes[] = { #define EMPTY _htole32(0xffffffff) #define DELETED _htole32(0xfffffffe) -#define BUCKET_ADDR(index, idx) (index->buckets + (idx * index->bucket_size)) +#define BUCKET_ADDR(index, idx) (index->buckets + ((idx) * index->bucket_size)) #define BUCKET_MATCHES_KEY(index, idx, key) (memcmp(key, BUCKET_ADDR(index, idx), index->key_size) == 0) @@ -562,6 +562,16 @@ hashindex_set(HashIndex *index, const void *key, const void *value) if(!hashindex_resize(index, index->num_buckets)) { return 0; } + /* we have just built a fresh hashtable and removed all tombstones, + * so we only have EMPTY or USED buckets, but no DELETED ones any more. + */ + idx = start_idx = hashindex_index(index, key); + while(!BUCKET_IS_EMPTY(index, idx)) { + idx++; + if (idx >= index->num_buckets){ + idx -= index->num_buckets; + } + } } } ptr = BUCKET_ADDR(index, idx); diff --git a/src/borg/algorithms/blake2-libselect.h b/src/borg/algorithms/blake2-libselect.h index 5486400ebf..5cd970d88d 100644 --- a/src/borg/algorithms/blake2-libselect.h +++ b/src/borg/algorithms/blake2-libselect.h @@ -1,5 +1,5 @@ #ifdef BORG_USE_LIBB2 #include #else -#include "blake2/blake2b-ref.c" +#include "blake2/ref/blake2.h" #endif diff --git a/src/borg/algorithms/blake2/blake2-impl.h b/src/borg/algorithms/blake2/ref/blake2-impl.h similarity index 99% rename from src/borg/algorithms/blake2/blake2-impl.h rename to src/borg/algorithms/blake2/ref/blake2-impl.h index ad9089eee7..5dff7fc7a3 100644 --- a/src/borg/algorithms/blake2/blake2-impl.h +++ b/src/borg/algorithms/blake2/ref/blake2-impl.h @@ -1,14 +1,14 @@ /* BLAKE2 reference source code package - reference C implementations - + Copyright 2012, Samuel Neves . You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - + - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 - + More information about the BLAKE2 hash function can be found at https://blake2.net. */ @@ -158,4 +158,3 @@ static BLAKE2_INLINE void secure_zero_memory(void *v, size_t n) } #endif - diff --git a/src/borg/algorithms/blake2/blake2.h b/src/borg/algorithms/blake2/ref/blake2.h similarity index 99% rename from src/borg/algorithms/blake2/blake2.h rename to src/borg/algorithms/blake2/ref/blake2.h index 6420c5367a..ad62f260e7 100644 --- a/src/borg/algorithms/blake2/blake2.h +++ b/src/borg/algorithms/blake2/ref/blake2.h @@ -193,4 +193,3 @@ extern "C" { #endif #endif - diff --git a/src/borg/algorithms/blake2/blake2b-ref.c b/src/borg/algorithms/blake2/ref/blake2b-ref.c similarity index 99% rename from src/borg/algorithms/blake2/blake2b-ref.c rename to src/borg/algorithms/blake2/ref/blake2b-ref.c index 0d36fb0deb..cd38b1ba00 100644 --- a/src/borg/algorithms/blake2/blake2b-ref.c +++ b/src/borg/algorithms/blake2/ref/blake2b-ref.c @@ -1,14 +1,14 @@ /* BLAKE2 reference source code package - reference C implementations - + Copyright 2012, Samuel Neves . You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - + - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 - + More information about the BLAKE2 hash function can be found at https://blake2.net. */ @@ -377,4 +377,3 @@ int main( void ) return -1; } #endif - diff --git a/src/borg/algorithms/checksums.pyx b/src/borg/algorithms/checksums.pyx index 6645dd0fe2..6d35c2209b 100644 --- a/src/borg/algorithms/checksums.pyx +++ b/src/borg/algorithms/checksums.pyx @@ -1,3 +1,5 @@ +# cython: language_level=3 + from ..helpers import bin_to_hex from libc.stdint cimport uint32_t @@ -12,7 +14,7 @@ cdef extern from "crc32_dispatch.c": int _have_clmul "have_clmul"() -cdef extern from "xxh64/xxhash.c": +cdef extern from "../algorithms/xxhash-libselect.h": ctypedef struct XXH64_canonical_t: char digest[8] diff --git a/src/borg/algorithms/crc32_slice_by_8.c b/src/borg/algorithms/crc32_slice_by_8.c index dcfd8b8f97..3ab0c84090 100644 --- a/src/borg/algorithms/crc32_slice_by_8.c +++ b/src/borg/algorithms/crc32_slice_by_8.c @@ -330,12 +330,26 @@ const uint32_t Crc32Lookup[8][256] = uint32_t crc32_slice_by_8(const void* data, size_t length, uint32_t previousCrc32) { uint32_t crc = ~previousCrc32; // same as previousCrc32 ^ 0xFFFFFFFF - const uint32_t* current = (const uint32_t*) data; + + const uint32_t* current; + const uint8_t* currentChar = (const uint8_t*) data; // enabling optimization (at least -O2) automatically unrolls the inner for-loop const size_t Unroll = 4; const size_t BytesAtOnce = 8 * Unroll; - const uint8_t* currentChar; + + // wanted: 32 bit / 4 Byte alignment, compute leading, unaligned bytes length + uintptr_t unaligned_length = (4 - (((uintptr_t) currentChar) & 3)) & 3; + // process unaligned bytes, if any (standard algorithm) + while ((length != 0) && (unaligned_length != 0)) + { + crc = (crc >> 8) ^ Crc32Lookup[0][(crc & 0xFF) ^ *currentChar++]; + length--; + unaligned_length--; + } + + // pointer points to 32bit aligned address now + current = (const uint32_t*) currentChar; // process 4x eight bytes at once (Slicing-by-8) while (length >= BytesAtOnce) diff --git a/src/borg/algorithms/lz4-libselect.h b/src/borg/algorithms/lz4-libselect.h new file mode 100644 index 0000000000..85c544d125 --- /dev/null +++ b/src/borg/algorithms/lz4-libselect.h @@ -0,0 +1,5 @@ +#ifdef BORG_USE_LIBLZ4 +#include +#else +#include "lz4/lib/lz4.h" +#endif diff --git a/src/borg/algorithms/lz4/lib/lz4.c b/src/borg/algorithms/lz4/lib/lz4.c new file mode 100644 index 0000000000..9808d70aed --- /dev/null +++ b/src/borg/algorithms/lz4/lib/lz4.c @@ -0,0 +1,2398 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (C) 2011-present, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ + +/*-************************************ +* Tuning parameters +**************************************/ +/* + * LZ4_HEAPMODE : + * Select how default compression functions will allocate memory for their hash table, + * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()). + */ +#ifndef LZ4_HEAPMODE +# define LZ4_HEAPMODE 0 +#endif + +/* + * ACCELERATION_DEFAULT : + * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0 + */ +#define ACCELERATION_DEFAULT 1 + + +/*-************************************ +* CPU Feature Detection +**************************************/ +/* LZ4_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets which assembly generation depends on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */ +# if defined(__GNUC__) && \ + ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \ + || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define LZ4_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__) +# define LZ4_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/* + * LZ4_FORCE_SW_BITCOUNT + * Define this parameter if your target system or compiler does not support hardware bit count + */ +#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */ +# define LZ4_FORCE_SW_BITCOUNT +#endif + + + +/*-************************************ +* Dependency +**************************************/ +/* + * LZ4_SRC_INCLUDED: + * Amalgamation flag, whether lz4.c is included + */ +#ifndef LZ4_SRC_INCLUDED +# define LZ4_SRC_INCLUDED 1 +#endif + +#ifndef LZ4_STATIC_LINKING_ONLY +#define LZ4_STATIC_LINKING_ONLY +#endif + +#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS +#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */ +#endif + +#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */ +#include "lz4.h" +/* see also "memory routines" below */ + + +/*-************************************ +* Compiler Options +**************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# include +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */ +#endif /* _MSC_VER */ + +#ifndef LZ4_FORCE_INLINE +# ifdef _MSC_VER /* Visual Studio */ +# define LZ4_FORCE_INLINE static __forceinline +# else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define LZ4_FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define LZ4_FORCE_INLINE static inline +# endif +# else +# define LZ4_FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +# endif /* _MSC_VER */ +#endif /* LZ4_FORCE_INLINE */ + +/* LZ4_FORCE_O2_GCC_PPC64LE and LZ4_FORCE_O2_INLINE_GCC_PPC64LE + * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8, + * together with a simple 8-byte copy loop as a fall-back path. + * However, this optimization hurts the decompression speed by >30%, + * because the execution does not go to the optimized loop + * for typical compressible data, and all of the preamble checks + * before going to the fall-back path become useless overhead. + * This optimization happens only with the -O3 flag, and -O2 generates + * a simple 8-byte copy loop. + * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8 + * functions are annotated with __attribute__((optimize("O2"))), + * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute + * of LZ4_wildCopy8 does not affect the compression speed. + */ +#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__) +# define LZ4_FORCE_O2_GCC_PPC64LE __attribute__((optimize("O2"))) +# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE __attribute__((optimize("O2"))) LZ4_FORCE_INLINE +#else +# define LZ4_FORCE_O2_GCC_PPC64LE +# define LZ4_FORCE_O2_INLINE_GCC_PPC64LE static +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__) +# define expect(expr,value) (__builtin_expect ((expr),(value)) ) +#else +# define expect(expr,value) (expr) +#endif + +#ifndef likely +#define likely(expr) expect((expr) != 0, 1) +#endif +#ifndef unlikely +#define unlikely(expr) expect((expr) != 0, 0) +#endif + + +/*-************************************ +* Memory routines +**************************************/ +#include /* malloc, calloc, free */ +#define ALLOC(s) malloc(s) +#define ALLOC_AND_ZERO(s) calloc(1,s) +#define FREEMEM(p) free(p) +#include /* memset, memcpy */ +#define MEM_INIT(p,v,s) memset((p),(v),(s)) + + +/*-************************************ +* Common Constants +**************************************/ +#define MINMATCH 4 + +#define WILDCOPYLENGTH 8 +#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */ +#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */ +#define FASTLOOP_SAFE_DISTANCE 64 +static const int LZ4_minLength = (MFLIMIT+1); + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define LZ4_DISTANCE_ABSOLUTE_MAX 65535 +#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */ +# error "LZ4_DISTANCE_MAX is too big : must be <= 65535" +#endif + +#define ML_BITS 4 +#define ML_MASK ((1U<=1) +# include +#else +# ifndef assert +# define assert(condition) ((void)0) +# endif +#endif + +#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */ + +#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) +# include +static int g_debuglog_enable = 1; +# define DEBUGLOG(l, ...) { \ + if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \ + fprintf(stderr, __FILE__ ": "); \ + fprintf(stderr, __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + + +/*-************************************ +* Types +**************************************/ +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef uintptr_t uptrval; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef size_t uptrval; /* generally true, except OpenVMS-64 */ +#endif + +#if defined(__x86_64__) + typedef U64 reg_t; /* 64-bits in x32 mode */ +#else + typedef size_t reg_t; /* 32-bits in x32 mode */ +#endif + +typedef enum { + notLimited = 0, + limitedOutput = 1, + fillOutput = 2 +} limitedOutput_directive; + + +/*-************************************ +* Reading and writing into memory +**************************************/ +static unsigned LZ4_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + + +#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2) +/* lie to the compiler about data alignment; use with caution */ + +static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; } +static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; } +static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; } + +static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } + +#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign; + +static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; } + +static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } +static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; } + +#else /* safe and portable access using memcpy() */ + +static U16 LZ4_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U32 LZ4_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static reg_t LZ4_read_ARCH(const void* memPtr) +{ + reg_t val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static void LZ4_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +static void LZ4_write32(void* memPtr, U32 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* LZ4_FORCE_MEMORY_ACCESS */ + + +static U16 LZ4_readLE16(const void* memPtr) +{ + if (LZ4_isLittleEndian()) { + return LZ4_read16(memPtr); + } else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)((U16)p[0] + (p[1]<<8)); + } +} + +static void LZ4_writeLE16(void* memPtr, U16 value) +{ + if (LZ4_isLittleEndian()) { + LZ4_write16(memPtr, value); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE) value; + p[1] = (BYTE)(value>>8); + } +} + +/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */ +LZ4_FORCE_O2_INLINE_GCC_PPC64LE +void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { memcpy(d,s,8); d+=8; s+=8; } while (d= 16. */ +LZ4_FORCE_O2_INLINE_GCC_PPC64LE void +LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* const e = (BYTE*)dstEnd; + + do { memcpy(d,s,16); memcpy(d+16,s+16,16); d+=32; s+=32; } while (d= dstPtr + MINMATCH + * - there is at least 8 bytes available to write after dstEnd */ +LZ4_FORCE_O2_INLINE_GCC_PPC64LE void +LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset) +{ + BYTE v[8]; + + assert(dstEnd >= dstPtr + MINMATCH); + LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */ + + switch(offset) { + case 1: + memset(v, *srcPtr, 8); + break; + case 2: + memcpy(v, srcPtr, 2); + memcpy(&v[2], srcPtr, 2); + memcpy(&v[4], &v[0], 4); + break; + case 4: + memcpy(v, srcPtr, 4); + memcpy(&v[4], srcPtr, 4); + break; + default: + LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset); + return; + } + + memcpy(dstPtr, v, 8); + dstPtr += 8; + while (dstPtr < dstEnd) { + memcpy(dstPtr, v, 8); + dstPtr += 8; + } +} +#endif + + +/*-************************************ +* Common functions +**************************************/ +static unsigned LZ4_NbCommonBytes (reg_t val) +{ + if (LZ4_isLittleEndian()) { + if (sizeof(val)==8) { +# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (int)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctzll((U64)val) >> 3; +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, + 0, 3, 1, 3, 1, 4, 2, 7, + 0, 2, 3, 6, 1, 5, 3, 5, + 1, 3, 4, 4, 2, 5, 6, 7, + 7, 0, 1, 2, 3, 3, 4, 6, + 2, 6, 5, 5, 3, 4, 5, 6, + 7, 1, 2, 4, 6, 4, 4, 5, + 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else /* 32 bits */ { +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r; + _BitScanForward( &r, (U32)val ); + return (int)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_ctz((U32)val) >> 3; +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, + 3, 2, 2, 1, 3, 2, 0, 1, + 3, 3, 1, 2, 2, 2, 2, 0, + 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else /* Big Endian CPU */ { + if (sizeof(val)==8) { /* 64-bits */ +# if defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clzll((U64)val) >> 3; +# else + static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits. + Just to avoid some static analyzer complaining about shift by 32 on 32-bits target. + Note that this code path is never triggered in 32-bits mode. */ + unsigned r; + if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else /* 32 bits */ { +# if defined(_MSC_VER) && !defined(LZ4_FORCE_SW_BITCOUNT) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif (defined(__clang__) || (defined(__GNUC__) && (__GNUC__>=3))) && !defined(LZ4_FORCE_SW_BITCOUNT) + return (unsigned)__builtin_clz((U32)val) >> 3; +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } + } +} + +#define STEPSIZE sizeof(reg_t) +LZ4_FORCE_INLINE +unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit) +{ + const BYTE* const pStart = pIn; + + if (likely(pIn < pInLimit-(STEPSIZE-1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { + pIn+=STEPSIZE; pMatch+=STEPSIZE; + } else { + return LZ4_NbCommonBytes(diff); + } } + + while (likely(pIn < pInLimit-(STEPSIZE-1))) { + reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn); + if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; } + pIn += LZ4_NbCommonBytes(diff); + return (unsigned)(pIn - pStart); + } + + if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn compression run slower on incompressible data */ + + +/*-************************************ +* Local Structures and types +**************************************/ +typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t; + +/** + * This enum distinguishes several different modes of accessing previous + * content in the stream. + * + * - noDict : There is no preceding content. + * - withPrefix64k : Table entries up to ctx->dictSize before the current blob + * blob being compressed are valid and refer to the preceding + * content (of length ctx->dictSize), which is available + * contiguously preceding in memory the content currently + * being compressed. + * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere + * else in memory, starting at ctx->dictionary with length + * ctx->dictSize. + * - usingDictCtx : Like usingExtDict, but everything concerning the preceding + * content is in a separate context, pointed to by + * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table + * entries in the current context that refer to positions + * preceding the beginning of the current compression are + * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx + * ->dictSize describe the location and size of the preceding + * content, and matches are found by looking in the ctx + * ->dictCtx->hashTable. + */ +typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive; +typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; + + +/*-************************************ +* Local Utils +**************************************/ +int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; } +const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; } +int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } +int LZ4_sizeofState() { return LZ4_STREAMSIZE; } + + +/*-************************************ +* Internal Definitions used in Tests +**************************************/ +#if defined (__cplusplus) +extern "C" { +#endif + +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize); + +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize); + +#if defined (__cplusplus) +} +#endif + +/*-****************************** +* Compression functions +********************************/ +static U32 LZ4_hash4(U32 sequence, tableType_t const tableType) +{ + if (tableType == byU16) + return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1))); + else + return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG)); +} + +static U32 LZ4_hash5(U64 sequence, tableType_t const tableType) +{ + const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG; + if (LZ4_isLittleEndian()) { + const U64 prime5bytes = 889523592379ULL; + return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog)); + } else { + const U64 prime8bytes = 11400714785074694791ULL; + return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog)); + } +} + +LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType) +{ + if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType); + return LZ4_hash4(LZ4_read32(p), tableType); +} + +static void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: { /* illegal! */ assert(0); return; } + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; } + } +} + +static void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType) +{ + switch (tableType) + { + default: /* fallthrough */ + case clearedTable: /* fallthrough */ + case byPtr: { /* illegal! */ assert(0); return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; } + case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; } + } +} + +static void LZ4_putPositionOnHash(const BYTE* p, U32 h, + void* tableBase, tableType_t const tableType, + const BYTE* srcBase) +{ + switch (tableType) + { + case clearedTable: { /* illegal! */ assert(0); return; } + case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; } + case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; } + case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; } + } +} + +LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + U32 const h = LZ4_hashPosition(p, tableType); + LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase); +} + +/* LZ4_getIndexOnHash() : + * Index of match position registered in hash table. + * hash position must be calculated by using base+index, or dictBase+index. + * Assumption 1 : only valid if tableType == byU32 or byU16. + * Assumption 2 : h is presumed valid (within limits of hash table) + */ +static U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType) +{ + LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2); + if (tableType == byU32) { + const U32* const hashTable = (const U32*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-2))); + return hashTable[h]; + } + if (tableType == byU16) { + const U16* const hashTable = (const U16*) tableBase; + assert(h < (1U << (LZ4_MEMORY_USAGE-1))); + return hashTable[h]; + } + assert(0); return 0; /* forbidden case */ +} + +static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase) +{ + if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; } + if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; } + { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */ +} + +LZ4_FORCE_INLINE const BYTE* +LZ4_getPosition(const BYTE* p, + const void* tableBase, tableType_t tableType, + const BYTE* srcBase) +{ + U32 const h = LZ4_hashPosition(p, tableType); + return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase); +} + +LZ4_FORCE_INLINE void +LZ4_prepareTable(LZ4_stream_t_internal* const cctx, + const int inputSize, + const tableType_t tableType) { + /* If compression failed during the previous step, then the context + * is marked as dirty, therefore, it has to be fully reset. + */ + if (cctx->dirty) { + DEBUGLOG(5, "LZ4_prepareTable: Full reset for %p", cctx); + MEM_INIT(cctx, 0, sizeof(LZ4_stream_t_internal)); + return; + } + + /* If the table hasn't been used, it's guaranteed to be zeroed out, and is + * therefore safe to use no matter what mode we're in. Otherwise, we figure + * out if it's safe to leave as is or whether it needs to be reset. + */ + if (cctx->tableType != clearedTable) { + assert(inputSize >= 0); + if (cctx->tableType != tableType + || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU) + || ((tableType == byU32) && cctx->currentOffset > 1 GB) + || tableType == byPtr + || inputSize >= 4 KB) + { + DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx); + MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE); + cctx->currentOffset = 0; + cctx->tableType = clearedTable; + } else { + DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)"); + } + } + + /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster + * than compressing without a gap. However, compressing with + * currentOffset == 0 is faster still, so we preserve that case. + */ + if (cctx->currentOffset != 0 && tableType == byU32) { + DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset"); + cctx->currentOffset += 64 KB; + } + + /* Finally, clear history */ + cctx->dictCtx = NULL; + cctx->dictionary = NULL; + cctx->dictSize = 0; +} + +/** LZ4_compress_generic() : + inlined, to ensure branches are decided at compilation time */ +LZ4_FORCE_INLINE int LZ4_compress_generic( + LZ4_stream_t_internal* const cctx, + const char* const source, + char* const dest, + const int inputSize, + int *inputConsumed, /* only written when outputDirective == fillOutput */ + const int maxOutputSize, + const limitedOutput_directive outputDirective, + const tableType_t tableType, + const dict_directive dictDirective, + const dictIssue_directive dictIssue, + const int acceleration) +{ + int result; + const BYTE* ip = (const BYTE*) source; + + U32 const startIndex = cctx->currentOffset; + const BYTE* base = (const BYTE*) source - startIndex; + const BYTE* lowLimit; + + const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx; + const BYTE* const dictionary = + dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary; + const U32 dictSize = + dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize; + const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */ + + int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx); + U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */ + const BYTE* const dictEnd = dictionary + dictSize; + const BYTE* anchor = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1; + const BYTE* const matchlimit = iend - LASTLITERALS; + + /* the dictCtx currentOffset is indexed on the start of the dictionary, + * while a dictionary in the current context precedes the currentOffset */ + const BYTE* dictBase = (dictDirective == usingDictCtx) ? + dictionary + dictSize - dictCtx->currentOffset : + dictionary + dictSize - startIndex; + + BYTE* op = (BYTE*) dest; + BYTE* const olimit = op + maxOutputSize; + + U32 offset = 0; + U32 forwardH; + + DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, tableType=%u", inputSize, tableType); + /* If init conditions are not met, we don't have to mark stream + * as having dirty context, since no action was taken yet */ + if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */ + if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported inputSize, too large (or negative) */ + if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */ + if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */ + assert(acceleration >= 1); + + lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0); + + /* Update context state */ + if (dictDirective == usingDictCtx) { + /* Subsequent linked blocks can't use the dictionary. */ + /* Instead, they use the block we just compressed. */ + cctx->dictCtx = NULL; + cctx->dictSize = (U32)inputSize; + } else { + cctx->dictSize += (U32)inputSize; + } + cctx->currentOffset += (U32)inputSize; + cctx->tableType = (U16)tableType; + + if (inputSizehashTable, tableType, base); + ip++; forwardH = LZ4_hashPosition(ip, tableType); + + /* Main Loop */ + for ( ; ; ) { + const BYTE* match; + BYTE* token; + const BYTE* filledIp; + + /* Find a match */ + if (tableType == byPtr) { + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base); + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base); + + } while ( (match+LZ4_DISTANCE_MAX < ip) + || (LZ4_read32(match) != LZ4_read32(ip)) ); + + } else { /* byU32, byU16 */ + + const BYTE* forwardIp = ip; + int step = 1; + int searchMatchNb = acceleration << LZ4_skipTrigger; + do { + U32 const h = forwardH; + U32 const current = (U32)(forwardIp - base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex <= current); + assert(forwardIp - base < (ptrdiff_t)(2 GB - 1)); + ip = forwardIp; + forwardIp += step; + step = (searchMatchNb++ >> LZ4_skipTrigger); + + if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals; + assert(ip < mflimitPlusOne); + + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + assert(tableType == byU32); + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + matchIndex += dictDelta; /* make dictCtx index comparable with current context */ + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex); + assert(startIndex - matchIndex >= MINMATCH); + match = dictBase + matchIndex; + lowLimit = dictionary; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; + } + } else { /* single continuous memory segment */ + match = base + matchIndex; + } + forwardH = LZ4_hashPosition(forwardIp, tableType); + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + + DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex); + if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */ + assert(matchIndex < current); + if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX)) + && (matchIndex+LZ4_DISTANCE_MAX < current)) { + continue; + } /* too far */ + assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */ + + if (LZ4_read32(match) == LZ4_read32(ip)) { + if (maybe_extMem) offset = current - matchIndex; + break; /* match found */ + } + + } while(1); + } + + /* Catch up */ + filledIp = ip; + while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; } + + /* Encode Literals */ + { unsigned const litLength = (unsigned)(ip - anchor); + token = op++; + if ((outputDirective == limitedOutput) && /* Check output buffer overflow */ + (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) { + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + if ((outputDirective == fillOutput) && + (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) { + op--; + goto _last_literals; + } + if (litLength >= RUN_MASK) { + int len = (int)(litLength - RUN_MASK); + *token = (RUN_MASK<= 255 ; len-=255) *op++ = 255; + *op++ = (BYTE)len; + } + else *token = (BYTE)(litLength< olimit)) { + /* the match was too close to the end, rewind and go to last literals */ + op = token; + goto _last_literals; + } + + /* Encode Offset */ + if (maybe_extMem) { /* static test */ + DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source)); + assert(offset <= LZ4_DISTANCE_MAX && offset > 0); + LZ4_writeLE16(op, (U16)offset); op+=2; + } else { + DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match)); + assert(ip-match <= LZ4_DISTANCE_MAX); + LZ4_writeLE16(op, (U16)(ip - match)); op+=2; + } + + /* Encode MatchLength */ + { unsigned matchCode; + + if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx) + && (lowLimit==dictionary) /* match within extDict */ ) { + const BYTE* limit = ip + (dictEnd-match); + assert(dictEnd > match); + if (limit > matchlimit) limit = matchlimit; + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit); + ip += (size_t)matchCode + MINMATCH; + if (ip==limit) { + unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit); + matchCode += more; + ip += more; + } + DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH); + } else { + matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit); + ip += (size_t)matchCode + MINMATCH; + DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH); + } + + if ((outputDirective) && /* Check output buffer overflow */ + (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) { + if (outputDirective == fillOutput) { + /* Match description too long : reduce it */ + U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255; + ip -= matchCode - newMatchCode; + assert(newMatchCode < matchCode); + matchCode = newMatchCode; + if (unlikely(ip <= filledIp)) { + /* We have already filled up to filledIp so if ip ends up less than filledIp + * we have positions in the hash table beyond the current position. This is + * a problem if we reuse the hash table. So we have to remove these positions + * from the hash table. + */ + const BYTE* ptr; + DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip)); + for (ptr = ip; ptr <= filledIp; ++ptr) { + U32 const h = LZ4_hashPosition(ptr, tableType); + LZ4_clearHash(h, cctx->hashTable, tableType); + } + } + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + if (matchCode >= ML_MASK) { + *token += ML_MASK; + matchCode -= ML_MASK; + LZ4_write32(op, 0xFFFFFFFF); + while (matchCode >= 4*255) { + op+=4; + LZ4_write32(op, 0xFFFFFFFF); + matchCode -= 4*255; + } + op += matchCode / 255; + *op++ = (BYTE)(matchCode % 255); + } else + *token += (BYTE)(matchCode); + } + /* Ensure we have enough space for the last literals. */ + assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit)); + + anchor = ip; + + /* Test end of chunk */ + if (ip >= mflimitPlusOne) break; + + /* Fill table */ + LZ4_putPosition(ip-2, cctx->hashTable, tableType, base); + + /* Test next position */ + if (tableType == byPtr) { + + match = LZ4_getPosition(ip, cctx->hashTable, tableType, base); + LZ4_putPosition(ip, cctx->hashTable, tableType, base); + if ( (match+LZ4_DISTANCE_MAX >= ip) + && (LZ4_read32(match) == LZ4_read32(ip)) ) + { token=op++; *token=0; goto _next_match; } + + } else { /* byU32, byU16 */ + + U32 const h = LZ4_hashPosition(ip, tableType); + U32 const current = (U32)(ip-base); + U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType); + assert(matchIndex < current); + if (dictDirective == usingDictCtx) { + if (matchIndex < startIndex) { + /* there was no match, try the dictionary */ + matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32); + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + matchIndex += dictDelta; + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else if (dictDirective==usingExtDict) { + if (matchIndex < startIndex) { + match = dictBase + matchIndex; + lowLimit = dictionary; /* required for match length counter */ + } else { + match = base + matchIndex; + lowLimit = (const BYTE*)source; /* required for match length counter */ + } + } else { /* single memory segment */ + match = base + matchIndex; + } + LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType); + assert(matchIndex < current); + if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1) + && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current)) + && (LZ4_read32(match) == LZ4_read32(ip)) ) { + token=op++; + *token=0; + if (maybe_extMem) offset = current - matchIndex; + DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i", + (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source)); + goto _next_match; + } + } + + /* Prepare next loop */ + forwardH = LZ4_hashPosition(++ip, tableType); + + } + +_last_literals: + /* Encode Last Literals */ + { size_t lastRun = (size_t)(iend - anchor); + if ( (outputDirective) && /* Check output buffer overflow */ + (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) { + if (outputDirective == fillOutput) { + /* adapt lastRun to fill 'dst' */ + assert(olimit >= op); + lastRun = (size_t)(olimit-op) - 1; + lastRun -= (lastRun+240)/255; + } else { + assert(outputDirective == limitedOutput); + return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */ + } + } + if (lastRun >= RUN_MASK) { + size_t accumulator = lastRun - RUN_MASK; + *op++ = RUN_MASK << ML_BITS; + for(; accumulator >= 255 ; accumulator-=255) *op++ = 255; + *op++ = (BYTE) accumulator; + } else { + *op++ = (BYTE)(lastRun< 0); + return result; +} + + +int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse; + assert(ctx != NULL); + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + if (maxOutputSize >= LZ4_compressBound(inputSize)) { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (inputSize < LZ4_64Klimit) { + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration); + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + +/** + * LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. It is only safe + * to call if the state buffer is known to be correctly initialized already + * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of + * "correctly initialized"). + */ +int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) +{ + LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse; + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + + if (dstCapacity >= LZ4_compressBound(srcSize)) { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration); + } + } else { + if (srcSize < LZ4_64Klimit) { + const tableType_t tableType = byU16; + LZ4_prepareTable(ctx, srcSize, tableType); + if (ctx->currentOffset) { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration); + } else { + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } else { + const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + LZ4_prepareTable(ctx, srcSize, tableType); + return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration); + } + } +} + + +int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration) +{ + int result; +#if (LZ4_HEAPMODE) + LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctxPtr == NULL) return 0; +#else + LZ4_stream_t ctx; + LZ4_stream_t* const ctxPtr = &ctx; +#endif + result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration); + +#if (LZ4_HEAPMODE) + FREEMEM(ctxPtr); +#endif + return result; +} + + +int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize) +{ + return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1); +} + + +/* hidden debug function */ +/* strangely enough, gcc generates faster code when this function is uncommented, even if unused */ +int LZ4_compress_fast_force(const char* src, char* dst, int srcSize, int dstCapacity, int acceleration) +{ + LZ4_stream_t ctx; + LZ4_initStream(&ctx, sizeof(ctx)); + + if (srcSize < LZ4_64Klimit) { + return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, byU16, noDict, noDictIssue, acceleration); + } else { + tableType_t const addrMode = (sizeof(void*) > 4) ? byU32 : byPtr; + return LZ4_compress_generic(&ctx.internal_donotuse, src, dst, srcSize, NULL, dstCapacity, limitedOutput, addrMode, noDict, noDictIssue, acceleration); + } +} + + +/* Note!: This function leaves the stream in an unclean/broken state! + * It is not safe to subsequently use the same state with a _fastReset() or + * _continue() call without resetting it. */ +static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize) +{ + void* const s = LZ4_initStream(state, sizeof (*state)); + assert(s != NULL); (void)s; + + if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */ + return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1); + } else { + if (*srcSizePtr < LZ4_64Klimit) { + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1); + } else { + tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32; + return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1); + } } +} + + +int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize) +{ +#if (LZ4_HEAPMODE) + LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */ + if (ctx == NULL) return 0; +#else + LZ4_stream_t ctxBody; + LZ4_stream_t* ctx = &ctxBody; +#endif + + int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize); + +#if (LZ4_HEAPMODE) + FREEMEM(ctx); +#endif + return result; +} + + + +/*-****************************** +* Streaming functions +********************************/ + +LZ4_stream_t* LZ4_createStream(void) +{ + LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); + LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */ + DEBUGLOG(4, "LZ4_createStream %p", lz4s); + if (lz4s == NULL) return NULL; + LZ4_initStream(lz4s, sizeof(*lz4s)); + return lz4s; +} + +#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 : + it reports an aligment of 8-bytes, + while actually aligning LZ4_stream_t on 4 bytes. */ +static size_t LZ4_stream_t_alignment(void) +{ + struct { char c; LZ4_stream_t t; } t_a; + return sizeof(t_a) - sizeof(t_a.t); +} +#endif + +LZ4_stream_t* LZ4_initStream (void* buffer, size_t size) +{ + DEBUGLOG(5, "LZ4_initStream"); + if (buffer == NULL) { return NULL; } + if (size < sizeof(LZ4_stream_t)) { return NULL; } +#ifndef _MSC_VER /* for some reason, Visual fails the aligment test on 32-bit x86 : + it reports an aligment of 8-bytes, + while actually aligning LZ4_stream_t on 4 bytes. */ + if (((size_t)buffer) & (LZ4_stream_t_alignment() - 1)) { return NULL; } /* alignment check */ +#endif + MEM_INIT(buffer, 0, sizeof(LZ4_stream_t)); + return (LZ4_stream_t*)buffer; +} + +/* resetStream is now deprecated, + * prefer initStream() which is more general */ +void LZ4_resetStream (LZ4_stream_t* LZ4_stream) +{ + DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream); + MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t)); +} + +void LZ4_resetStream_fast(LZ4_stream_t* ctx) { + LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32); +} + +int LZ4_freeStream (LZ4_stream_t* LZ4_stream) +{ + if (!LZ4_stream) return 0; /* support free on NULL */ + DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream); + FREEMEM(LZ4_stream); + return (0); +} + + +#define HASH_UNIT sizeof(reg_t) +int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize) +{ + LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse; + const tableType_t tableType = byU32; + const BYTE* p = (const BYTE*)dictionary; + const BYTE* const dictEnd = p + dictSize; + const BYTE* base; + + DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict); + + /* It's necessary to reset the context, + * and not just continue it with prepareTable() + * to avoid any risk of generating overflowing matchIndex + * when compressing using this dictionary */ + LZ4_resetStream(LZ4_dict); + + /* We always increment the offset by 64 KB, since, if the dict is longer, + * we truncate it to the last 64k, and if it's shorter, we still want to + * advance by a whole window length so we can provide the guarantee that + * there are only valid offsets in the window, which allows an optimization + * in LZ4_compress_fast_continue() where it uses noDictIssue even when the + * dictionary isn't a full 64k. */ + dict->currentOffset += 64 KB; + + if (dictSize < (int)HASH_UNIT) { + return 0; + } + + if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB; + base = dictEnd - dict->currentOffset; + dict->dictionary = p; + dict->dictSize = (U32)(dictEnd - p); + dict->tableType = tableType; + + while (p <= dictEnd-HASH_UNIT) { + LZ4_putPosition(p, dict->hashTable, tableType, base); + p+=3; + } + + return (int)dict->dictSize; +} + +void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) { + const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL : + &(dictionaryStream->internal_donotuse); + + DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)", + workingStream, dictionaryStream, + dictCtx != NULL ? dictCtx->dictSize : 0); + + /* Calling LZ4_resetStream_fast() here makes sure that changes will not be + * erased by subsequent calls to LZ4_resetStream_fast() in case stream was + * marked as having dirty context, e.g. requiring full reset. + */ + LZ4_resetStream_fast(workingStream); + + if (dictCtx != NULL) { + /* If the current offset is zero, we will never look in the + * external dictionary context, since there is no value a table + * entry can take that indicate a miss. In that case, we need + * to bump the offset to something non-zero. + */ + if (workingStream->internal_donotuse.currentOffset == 0) { + workingStream->internal_donotuse.currentOffset = 64 KB; + } + + /* Don't actually attach an empty dictionary. + */ + if (dictCtx->dictSize == 0) { + dictCtx = NULL; + } + } + workingStream->internal_donotuse.dictCtx = dictCtx; +} + + +static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize) +{ + assert(nextSize >= 0); + if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */ + /* rescale hash table */ + U32 const delta = LZ4_dict->currentOffset - 64 KB; + const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize; + int i; + DEBUGLOG(4, "LZ4_renormDictT"); + for (i=0; ihashTable[i] < delta) LZ4_dict->hashTable[i]=0; + else LZ4_dict->hashTable[i] -= delta; + } + LZ4_dict->currentOffset = 64 KB; + if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB; + LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize; + } +} + + +int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream, + const char* source, char* dest, + int inputSize, int maxOutputSize, + int acceleration) +{ + const tableType_t tableType = byU32; + LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse; + const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize; + + DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize); + + if (streamPtr->dirty) { return 0; } /* Uninitialized structure detected */ + LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */ + if (acceleration < 1) acceleration = ACCELERATION_DEFAULT; + + /* invalidate tiny dictionaries */ + if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */ + && (dictEnd != (const BYTE*)source) ) { + DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary); + streamPtr->dictSize = 0; + streamPtr->dictionary = (const BYTE*)source; + dictEnd = (const BYTE*)source; + } + + /* Check overlapping input/dictionary space */ + { const BYTE* sourceEnd = (const BYTE*) source + inputSize; + if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) { + streamPtr->dictSize = (U32)(dictEnd - sourceEnd); + if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB; + if (streamPtr->dictSize < 4) streamPtr->dictSize = 0; + streamPtr->dictionary = dictEnd - streamPtr->dictSize; + } + } + + /* prefix mode : source data follows dictionary */ + if (dictEnd == (const BYTE*)source) { + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration); + else + return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration); + } + + /* external dictionary mode */ + { int result; + if (streamPtr->dictCtx) { + /* We depend here on the fact that dictCtx'es (produced by + * LZ4_loadDict) guarantee that their tables contain no references + * to offsets between dictCtx->currentOffset - 64 KB and + * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe + * to use noDictIssue even when the dict isn't a full 64 KB. + */ + if (inputSize > 4 KB) { + /* For compressing large blobs, it is faster to pay the setup + * cost to copy the dictionary's tables into the active context, + * so that the compression loop is only looking into one table. + */ + memcpy(streamPtr, streamPtr->dictCtx, sizeof(LZ4_stream_t)); + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration); + } + } else { + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration); + } + } + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)inputSize; + return result; + } +} + + +/* Hidden debug function, to force-test external dictionary mode */ +int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize) +{ + LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse; + int result; + + LZ4_renormDictT(streamPtr, srcSize); + + if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1); + } else { + result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1); + } + + streamPtr->dictionary = (const BYTE*)source; + streamPtr->dictSize = (U32)srcSize; + + return result; +} + + +/*! LZ4_saveDict() : + * If previously compressed data block is not guaranteed to remain available at its memory location, + * save it into a safer place (char* safeBuffer). + * Note : you don't need to call LZ4_loadDict() afterwards, + * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue(). + * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error. + */ +int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize) +{ + LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse; + const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize; + + if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */ + if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; } + + memmove(safeBuffer, previousDictEnd - dictSize, dictSize); + + dict->dictionary = (const BYTE*)safeBuffer; + dict->dictSize = (U32)dictSize; + + return dictSize; +} + + + +/*-******************************* + * Decompression functions + ********************************/ + +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive; + +#undef MIN +#define MIN(a,b) ( (a) < (b) ? (a) : (b) ) + +/* Read the variable-length literal or match length. + * + * ip - pointer to use as input. + * lencheck - end ip. Return an error if ip advances >= lencheck. + * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so. + * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so. + * error (output) - error code. Should be set to 0 before call. + */ +typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error; +LZ4_FORCE_INLINE unsigned +read_variable_length(const BYTE**ip, const BYTE* lencheck, int loop_check, int initial_check, variable_length_error* error) +{ + unsigned length = 0; + unsigned s; + if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */ + *error = initial_error; + return length; + } + do { + s = **ip; + (*ip)++; + length += s; + if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */ + *error = loop_error; + return length; + } + } while (s==255); + + return length; +} + +/*! LZ4_decompress_generic() : + * This generic decompression function covers all use cases. + * It shall be instantiated several times, using different sets of directives. + * Note that it is important for performance that this function really get inlined, + * in order to remove useless branches during compilation optimization. + */ +LZ4_FORCE_INLINE int +LZ4_decompress_generic( + const char* const src, + char* const dst, + int srcSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */ + + endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */ + earlyEnd_directive partialDecoding, /* full, partial */ + dict_directive dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) +{ + if (src == NULL) { return -1; } + + { const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + + BYTE* op = (BYTE*) dst; + BYTE* const oend = op + outputSize; + BYTE* cpy; + + const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize; + + const int safeDecode = (endOnInput==endOnInputSize); + const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + + + /* Set up the "end" pointers for the shortcut. */ + const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/; + const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/; + + const BYTE* match; + size_t offset; + unsigned token; + size_t length; + + + DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize); + + /* Special cases */ + assert(lowPrefix <= op); + if ((endOnInput) && (unlikely(outputSize==0))) { + /* Empty output buffer */ + if (partialDecoding) return 0; + return ((srcSize==1) && (*ip==0)) ? 0 : -1; + } + if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); } + if ((endOnInput) && unlikely(srcSize==0)) { return -1; } + + /* Currently the fast loop shows a regression on qualcomm arm chips. */ +#if LZ4_FAST_DEC_LOOP + if ((oend - op) < FASTLOOP_SAFE_DISTANCE) { + DEBUGLOG(6, "skip fast decode loop"); + goto safe_decode; + } + + /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */ + while (1) { + /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */ + assert(oend - op >= FASTLOOP_SAFE_DISTANCE); + if (endOnInput) { assert(ip < iend); } + token = *ip++; + length = token >> ML_BITS; /* literal length */ + + assert(!endOnInput || ip <= iend); /* ip < iend before the increment */ + + /* decode literal length */ + if (length == RUN_MASK) { + variable_length_error error = ok; + length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error); + if (error == initial_error) { goto _output_error; } + if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + + /* copy literals */ + cpy = op+length; + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if (endOnInput) { /* LZ4_decompress_safe() */ + if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; } + LZ4_wildCopy32(op, ip, cpy); + } else { /* LZ4_decompress_fast() */ + if (cpy>oend-8) { goto safe_literal_copy; } + LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time : + * it doesn't know input length, and only relies on end-of-block properties */ + } + ip += length; op = cpy; + } else { + cpy = op+length; + if (endOnInput) { /* LZ4_decompress_safe() */ + DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length); + /* We don't need to check oend, since we check it once for each loop below */ + if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; } + /* Literals can only be 14, but hope compilers optimize if we copy by a register size */ + memcpy(op, ip, 16); + } else { /* LZ4_decompress_fast() */ + /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time : + * it doesn't know input length, and relies on end-of-block properties */ + memcpy(op, ip, 8); + if (length > 8) { memcpy(op+8, ip+8, 8); } + } + ip += length; op = cpy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + assert(match <= op); + + /* get matchlength */ + length = token & ML_MASK; + + if (length == ML_MASK) { + variable_length_error error = ok; + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ + length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error); + if (error != ok) { goto _output_error; } + if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */ + length += MINMATCH; + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + goto safe_match_copy; + } + } else { + length += MINMATCH; + if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) { + goto safe_match_copy; + } + + /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */ + if ((dict == withPrefix64k) || (match >= lowPrefix)) { + if (offset >= 8) { + assert(match >= lowPrefix); + assert(match <= op); + assert(op + 18 <= oend); + + memcpy(op, match, 8); + memcpy(op+8, match+8, 8); + memcpy(op+16, match+16, 2); + op += length; + continue; + } } } + + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */ + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) { + length = MIN(length, (size_t)(oend-op)); /* reach end of buffer */ + } else { + goto _output_error; /* end-of-block condition violated */ + } } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) { *op++ = *copyFrom++; } + } else { + memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + + /* copy match within block */ + cpy = op + length; + + assert((op <= oend) && (oend-op >= 32)); + if (unlikely(offset<16)) { + LZ4_memcpy_using_offset(op, match, cpy, offset); + } else { + LZ4_wildCopy32(op, match, cpy); + } + + op = cpy; /* wildcopy correction */ + } + safe_decode: +#endif + + /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */ + while (1) { + token = *ip++; + length = token >> ML_BITS; /* literal length */ + + assert(!endOnInput || ip <= iend); /* ip < iend before the increment */ + + /* A two-stage shortcut for the most common case: + * 1) If the literal length is 0..14, and there is enough space, + * enter the shortcut and copy 16 bytes on behalf of the literals + * (in the fast mode, only 8 bytes can be safely copied this way). + * 2) Further if the match length is 4..18, copy 18 bytes in a similar + * manner; but we ensure that there's enough space in the output for + * those 18 bytes earlier, upon entering the shortcut (in other words, + * there is a combined check for both stages). + */ + if ( (endOnInput ? length != RUN_MASK : length <= 8) + /* strictly "less than" on input, to re-enter the loop with at least one byte */ + && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) { + /* Copy the literals */ + memcpy(op, ip, endOnInput ? 16 : 8); + op += length; ip += length; + + /* The second stage: prepare for match copying, decode full info. + * If it doesn't work out, the info won't be wasted. */ + length = token & ML_MASK; /* match length */ + offset = LZ4_readLE16(ip); ip += 2; + match = op - offset; + assert(match <= op); /* check overflow */ + + /* Do not deal with overlapping matches. */ + if ( (length != ML_MASK) + && (offset >= 8) + && (dict==withPrefix64k || match >= lowPrefix) ) { + /* Copy the match. */ + memcpy(op + 0, match + 0, 8); + memcpy(op + 8, match + 8, 8); + memcpy(op +16, match +16, 2); + op += length + MINMATCH; + /* Both stages worked, load the next token. */ + continue; + } + + /* The second stage didn't work out, but the info is ready. + * Propel it right to the point of match copying. */ + goto _copy_match; + } + + /* decode literal length */ + if (length == RUN_MASK) { + variable_length_error error = ok; + length += read_variable_length(&ip, iend-RUN_MASK, endOnInput, endOnInput, &error); + if (error == initial_error) { goto _output_error; } + if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */ + if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */ + } + + /* copy literals */ + cpy = op+length; +#if LZ4_FAST_DEC_LOOP + safe_literal_copy: +#endif + LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH); + if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) ) + || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) ) + { + /* We've either hit the input parsing restriction or the output parsing restriction. + * If we've hit the input parsing condition then this must be the last sequence. + * If we've hit the output parsing condition then we are either using partialDecoding + * or we've hit the output parsing condition. + */ + if (partialDecoding) { + /* Since we are partial decoding we may be in this block because of the output parsing + * restriction, which is not valid since the output buffer is allowed to be undersized. + */ + assert(endOnInput); + /* If we're in this block because of the input parsing condition, then we must be on the + * last sequence (or invalid), so we must check that we exactly consume the input. + */ + if ((ip+length>iend-(2+1+LASTLITERALS)) && (ip+length != iend)) { goto _output_error; } + assert(ip+length <= iend); + /* We are finishing in the middle of a literals segment. + * Break after the copy. + */ + if (cpy > oend) { + cpy = oend; + assert(op<=oend); + length = (size_t)(oend-op); + } + assert(ip+length <= iend); + } else { + /* We must be on the last sequence because of the parsing limitations so check + * that we exactly regenerate the original size (must be exact when !endOnInput). + */ + if ((!endOnInput) && (cpy != oend)) { goto _output_error; } + /* We must be on the last sequence (or invalid) because of the parsing limitations + * so check that we exactly consume the input and don't overrun the output buffer. + */ + if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) { goto _output_error; } + } + memmove(op, ip, length); /* supports overlapping memory regions, which only matters for in-place decompression scenarios */ + ip += length; + op += length; + /* Necessarily EOF when !partialDecoding. When partialDecoding + * it is EOF if we've either filled the output buffer or hit + * the input parsing restriction. + */ + if (!partialDecoding || (cpy == oend) || (ip == iend)) { + break; + } + } else { + LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */ + ip += length; op = cpy; + } + + /* get offset */ + offset = LZ4_readLE16(ip); ip+=2; + match = op - offset; + + /* get matchlength */ + length = token & ML_MASK; + + _copy_match: + if (length == ML_MASK) { + variable_length_error error = ok; + length += read_variable_length(&ip, iend - LASTLITERALS + 1, endOnInput, 0, &error); + if (error != ok) goto _output_error; + if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + +#if LZ4_FAST_DEC_LOOP + safe_match_copy: +#endif + if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */ + /* match starting within external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) { + if (unlikely(op+length > oend-LASTLITERALS)) { + if (partialDecoding) length = MIN(length, (size_t)(oend-op)); + else goto _output_error; /* doesn't respect parsing restriction */ + } + + if (length <= (size_t)(lowPrefix-match)) { + /* match fits entirely within external dictionary : just copy */ + memmove(op, dictEnd - (lowPrefix-match), length); + op += length; + } else { + /* match stretches into both external dictionary and current block */ + size_t const copySize = (size_t)(lowPrefix - match); + size_t const restSize = length - copySize; + memcpy(op, dictEnd - copySize, copySize); + op += copySize; + if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */ + BYTE* const endOfMatch = op + restSize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } else { + memcpy(op, lowPrefix, restSize); + op += restSize; + } } + continue; + } + assert(match >= lowPrefix); + + /* copy match within block */ + cpy = op + length; + + /* partialDecoding : may end anywhere within the block */ + assert(op<=oend); + if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + size_t const mlen = MIN(length, (size_t)(oend-op)); + const BYTE* const matchEnd = match + mlen; + BYTE* const copyEnd = op + mlen; + if (matchEnd > op) { /* overlap copy */ + while (op < copyEnd) { *op++ = *match++; } + } else { + memcpy(op, match, mlen); + } + op = copyEnd; + if (op == oend) { break; } + continue; + } + + if (unlikely(offset<8)) { + LZ4_write32(op, 0); /* silence msan warning when offset==0 */ + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += inc32table[offset]; + memcpy(op+4, match, 4); + match -= dec64table[offset]; + } else { + memcpy(op, match, 8); + match += 8; + } + op += 8; + + if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) { + BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1); + if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */ + if (op < oCopyLimit) { + LZ4_wildCopy8(op, match, oCopyLimit); + match += oCopyLimit - op; + op = oCopyLimit; + } + while (op < cpy) { *op++ = *match++; } + } else { + memcpy(op, match, 8); + if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); } + } + op = cpy; /* wildcopy correction */ + } + + /* end of decoding */ + if (endOnInput) { + return (int) (((char*)op)-dst); /* Nb of output bytes decoded */ + } else { + return (int) (((const char*)ip)-src); /* Nb of input bytes read */ + } + + /* Overflow error detected */ + _output_error: + return (int) (-(((const char*)ip)-src))-1; + } +} + + +/*===== Instantiate the API decoding functions. =====*/ + +LZ4_FORCE_O2_GCC_PPC64LE +int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize, + endOnInputSize, decode_full_block, noDict, + (BYTE*)dest, NULL, 0); +} + +LZ4_FORCE_O2_GCC_PPC64LE +int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity) +{ + dstCapacity = MIN(targetOutputSize, dstCapacity); + return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity, + endOnInputSize, partial_decode, + noDict, (BYTE*)dst, NULL, 0); +} + +LZ4_FORCE_O2_GCC_PPC64LE +int LZ4_decompress_fast(const char* source, char* dest, int originalSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, decode_full_block, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +/*===== Instantiate a few more decoding cases, used more than once. =====*/ + +LZ4_FORCE_O2_GCC_PPC64LE /* Exported, an obsolete API function. */ +int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, withPrefix64k, + (BYTE*)dest - 64 KB, NULL, 0); +} + +/* Another obsolete API function, paired with the previous one. */ +int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize) +{ + /* LZ4_decompress_fast doesn't validate match offsets, + * and thus serves well with any prefixed dictionary. */ + return LZ4_decompress_fast(source, dest, originalSize); +} + +LZ4_FORCE_O2_GCC_PPC64LE +static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, noDict, + (BYTE*)dest-prefixSize, NULL, 0); +} + +LZ4_FORCE_O2_GCC_PPC64LE +int LZ4_decompress_safe_forceExtDict(const char* source, char* dest, + int compressedSize, int maxOutputSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_O2_GCC_PPC64LE +static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize, + const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, decode_full_block, usingExtDict, + (BYTE*)dest, (const BYTE*)dictStart, dictSize); +} + +/* The "double dictionary" mode, for use with e.g. ring buffers: the first part + * of the dictionary is passed as prefix, and the second via dictStart + dictSize. + * These routines are used only once, in LZ4_decompress_*_continue(). + */ +LZ4_FORCE_INLINE +int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, + endOnInputSize, decode_full_block, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} + +LZ4_FORCE_INLINE +int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize, + size_t prefixSize, const void* dictStart, size_t dictSize) +{ + return LZ4_decompress_generic(source, dest, 0, originalSize, + endOnOutputSize, decode_full_block, usingExtDict, + (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize); +} + +/*===== streaming decompression functions =====*/ + +LZ4_streamDecode_t* LZ4_createStreamDecode(void) +{ + LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t)); + LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */ + return lz4s; +} + +int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream) +{ + if (LZ4_stream == NULL) { return 0; } /* support free on NULL */ + FREEMEM(LZ4_stream); + return 0; +} + +/*! LZ4_setStreamDecode() : + * Use this function to instruct where to find the dictionary. + * This function is not necessary if previous data is still available where it was decoded. + * Loading a size of 0 is allowed (same effect as no dictionary). + * @return : 1 if OK, 0 if error + */ +int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + lz4sd->prefixSize = (size_t) dictSize; + lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize; + lz4sd->externalDict = NULL; + lz4sd->extDictSize = 0; + return 1; +} + +/*! LZ4_decoderRingBufferSize() : + * when setting a ring buffer for streaming decompression (optional scenario), + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * Note : in a ring buffer scenario, + * blocks are presumed decompressed next to each other. + * When not enough space remains for next block (remainingSize < maxBlockSize), + * decoding resumes from beginning of ring buffer. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +int LZ4_decoderRingBufferSize(int maxBlockSize) +{ + if (maxBlockSize < 0) return 0; + if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0; + if (maxBlockSize < 16) maxBlockSize = 16; + return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize); +} + +/* +*_continue() : + These decoding functions allow decompression of multiple blocks in "streaming" mode. + Previously decoded blocks must still be available at the memory position where they were decoded. + If it's not possible, save the relevant part of decoded data into a safe buffer, + and indicate where it stands using LZ4_setStreamDecode() +*/ +LZ4_FORCE_O2_GCC_PPC64LE +int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + + if (lz4sd->prefixSize == 0) { + /* The first call, no dictionary yet. */ + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + /* They're rolling the current segment. */ + if (lz4sd->prefixSize >= 64 KB - 1) + result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + else if (lz4sd->extDictSize == 0) + result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize); + else + result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)result; + lz4sd->prefixEnd += result; + } else { + /* The buffer wraps around, or they're switching to another buffer. */ + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)result; + lz4sd->prefixEnd = (BYTE*)dest + result; + } + + return result; +} + +LZ4_FORCE_O2_GCC_PPC64LE +int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize) +{ + LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse; + int result; + assert(originalSize >= 0); + + if (lz4sd->prefixSize == 0) { + assert(lz4sd->extDictSize == 0); + result = LZ4_decompress_fast(source, dest, originalSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } else if (lz4sd->prefixEnd == (BYTE*)dest) { + if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0) + result = LZ4_decompress_fast(source, dest, originalSize); + else + result = LZ4_decompress_fast_doubleDict(source, dest, originalSize, + lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize += (size_t)originalSize; + lz4sd->prefixEnd += originalSize; + } else { + lz4sd->extDictSize = lz4sd->prefixSize; + lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; + result = LZ4_decompress_fast_extDict(source, dest, originalSize, + lz4sd->externalDict, lz4sd->extDictSize); + if (result <= 0) return result; + lz4sd->prefixSize = (size_t)originalSize; + lz4sd->prefixEnd = (BYTE*)dest + originalSize; + } + + return result; +} + + +/* +Advanced decoding functions : +*_usingDict() : + These decoding functions work the same as "_continue" ones, + the dictionary must be explicitly provided within parameters +*/ + +int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize) +{ + if (dictSize==0) + return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize); + if (dictStart+dictSize == dest) { + if (dictSize >= 64 KB - 1) { + return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize); + } + assert(dictSize >= 0); + return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize); +} + +int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize) +{ + if (dictSize==0 || dictStart+dictSize == dest) + return LZ4_decompress_fast(source, dest, originalSize); + assert(dictSize >= 0); + return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize); +} + + +/*=************************************************* +* Obsolete Functions +***************************************************/ +/* obsolete compression functions */ +int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize) +{ + return LZ4_compress_default(source, dest, inputSize, maxOutputSize); +} +int LZ4_compress(const char* src, char* dest, int srcSize) +{ + return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize)); +} +int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1); +} +int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize) +{ + return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1); +} +int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity) +{ + return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1); +} +int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize) +{ + return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1); +} + +/* +These decompression functions are deprecated and should no longer be used. +They are only provided here for compatibility with older user programs. +- LZ4_uncompress is totally equivalent to LZ4_decompress_fast +- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe +*/ +int LZ4_uncompress (const char* source, char* dest, int outputSize) +{ + return LZ4_decompress_fast(source, dest, outputSize); +} +int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) +{ + return LZ4_decompress_safe(source, dest, isize, maxOutputSize); +} + +/* Obsolete Streaming functions */ + +int LZ4_sizeofStreamState() { return LZ4_STREAMSIZE; } + +int LZ4_resetStreamState(void* state, char* inputBuffer) +{ + (void)inputBuffer; + LZ4_resetStream((LZ4_stream_t*)state); + return 0; +} + +void* LZ4_create (char* inputBuffer) +{ + (void)inputBuffer; + return LZ4_createStream(); +} + +char* LZ4_slideInputBuffer (void* state) +{ + /* avoid const char * -> char * conversion warning */ + return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary; +} + +#endif /* LZ4_COMMONDEFS_ONLY */ diff --git a/src/borg/algorithms/lz4/lib/lz4.h b/src/borg/algorithms/lz4/lib/lz4.h new file mode 100644 index 0000000000..32108e2329 --- /dev/null +++ b/src/borg/algorithms/lz4/lib/lz4.h @@ -0,0 +1,764 @@ +/* + * LZ4 - Fast LZ compression algorithm + * Header File + * Copyright (C) 2011-present, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - LZ4 homepage : http://www.lz4.org + - LZ4 source repository : https://github.com/lz4/lz4 +*/ +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef LZ4_H_2983827168210 +#define LZ4_H_2983827168210 + +/* --- Dependency --- */ +#include /* size_t */ + + +/** + Introduction + + LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core, + scalable with multi-cores CPU. It features an extremely fast decoder, with speed in + multiple GB/s per core, typically reaching RAM speed limits on multi-core systems. + + The LZ4 compression library provides in-memory compression and decompression functions. + It gives full buffer control to user. + Compression can be done in: + - a single step (described as Simple Functions) + - a single step, reusing a context (described in Advanced Functions) + - unbounded multiple steps (described as Streaming compression) + + lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md). + Decompressing such a compressed block requires additional metadata. + Exact metadata depends on exact decompression function. + For the typical case of LZ4_decompress_safe(), + metadata includes block's compressed size, and maximum bound of decompressed size. + Each application is free to encode and pass such metadata in whichever way it wants. + + lz4.h only handle blocks, it can not generate Frames. + + Blocks are different from Frames (doc/lz4_Frame_format.md). + Frames bundle both blocks and metadata in a specified manner. + Embedding metadata is required for compressed data to be self-contained and portable. + Frame format is delivered through a companion API, declared in lz4frame.h. + The `lz4` CLI can only manage frames. +*/ + +/*^*************************************************************** +* Export parameters +*****************************************************************/ +/* +* LZ4_DLL_EXPORT : +* Enable exporting of functions when building a Windows DLL +* LZ4LIB_VISIBILITY : +* Control library symbols visibility. +*/ +#ifndef LZ4LIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define LZ4LIB_VISIBILITY +# endif +#endif +#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1) +# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY +#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1) +# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define LZ4LIB_API LZ4LIB_VISIBILITY +#endif + +/*------ Version ------*/ +#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */ +#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */ +#define LZ4_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ + +#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE) + +#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE +#define LZ4_QUOTE(str) #str +#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str) +#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION) + +LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */ +LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */ + + +/*-************************************ +* Tuning parameter +**************************************/ +/*! + * LZ4_MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio. + * Reduced memory usage may improve speed, thanks to better cache locality. + * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache + */ +#ifndef LZ4_MEMORY_USAGE +# define LZ4_MEMORY_USAGE 14 +#endif + + +/*-************************************ +* Simple Functions +**************************************/ +/*! LZ4_compress_default() : + * Compresses 'srcSize' bytes from buffer 'src' + * into already allocated 'dst' buffer of size 'dstCapacity'. + * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize). + * It also runs faster, so it's a recommended setting. + * If the function cannot compress 'src' into a more limited 'dst' budget, + * compression stops *immediately*, and the function result is zero. + * In which case, 'dst' content is undefined (invalid). + * srcSize : max supported value is LZ4_MAX_INPUT_SIZE. + * dstCapacity : size of buffer 'dst' (which must be already allocated) + * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity) + * or 0 if compression fails + * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer). + */ +LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity); + +/*! LZ4_decompress_safe() : + * compressedSize : is the exact complete size of the compressed block. + * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size. + * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity) + * If destination buffer is not large enough, decoding will stop and output an error code (negative value). + * If the source stream is detected malformed, the function will stop decoding and return a negative result. + * Note 1 : This function is protected against malicious data packets : + * it will never writes outside 'dst' buffer, nor read outside 'source' buffer, + * even if the compressed block is maliciously modified to order the decoder to do these actions. + * In such case, the decoder stops immediately, and considers the compressed block malformed. + * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them. + * The implementation is free to send / store / derive this information in whichever way is most beneficial. + * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead. + */ +LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity); + + +/*-************************************ +* Advanced Functions +**************************************/ +#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */ +#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) + +/*! LZ4_compressBound() : + Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible) + This function is primarily useful for memory allocation purposes (destination buffer size). + Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example). + Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize) + inputSize : max supported value is LZ4_MAX_INPUT_SIZE + return : maximum output size in a "worst case" scenario + or 0, if input size is incorrect (too large or negative) +*/ +LZ4LIB_API int LZ4_compressBound(int inputSize); + +/*! LZ4_compress_fast() : + Same as LZ4_compress_default(), but allows selection of "acceleration" factor. + The larger the acceleration value, the faster the algorithm, but also the lesser the compression. + It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed. + An acceleration value of "1" is the same as regular LZ4_compress_default() + Values <= 0 will be replaced by ACCELERATION_DEFAULT (currently == 1, see lz4.c). +*/ +LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + + +/*! LZ4_compress_fast_extState() : + * Same as LZ4_compress_fast(), using an externally allocated memory space for its state. + * Use LZ4_sizeofState() to know how much memory must be allocated, + * and allocate it on 8-bytes boundaries (using `malloc()` typically). + * Then, provide this buffer as `void* state` to compression function. + */ +LZ4LIB_API int LZ4_sizeofState(void); +LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + + +/*! LZ4_compress_destSize() : + * Reverse the logic : compresses as much data as possible from 'src' buffer + * into already allocated buffer 'dst', of size >= 'targetDestSize'. + * This function either compresses the entire 'src' content into 'dst' if it's large enough, + * or fill 'dst' buffer completely with as much data as possible from 'src'. + * note: acceleration parameter is fixed to "default". + * + * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'. + * New value is necessarily <= input value. + * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize) + * or 0 if compression fails. +*/ +LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize); + + +/*! LZ4_decompress_safe_partial() : + * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src', + * into destination buffer 'dst' of size 'dstCapacity'. + * Up to 'targetOutputSize' bytes will be decoded. + * The function stops decoding on reaching this objective, + * which can boost performance when only the beginning of a block is required. + * + * @return : the number of bytes decoded in `dst` (necessarily <= dstCapacity) + * If source stream is detected malformed, function returns a negative result. + * + * Note : @return can be < targetOutputSize, if compressed block contains less data. + * + * Note 2 : this function features 2 parameters, targetOutputSize and dstCapacity, + * and expects targetOutputSize <= dstCapacity. + * It effectively stops decoding on reaching targetOutputSize, + * so dstCapacity is kind of redundant. + * This is because in a previous version of this function, + * decoding operation would not "break" a sequence in the middle. + * As a consequence, there was no guarantee that decoding would stop at exactly targetOutputSize, + * it could write more bytes, though only up to dstCapacity. + * Some "margin" used to be required for this operation to work properly. + * This is no longer necessary. + * The function nonetheless keeps its signature, in an effort to not break API. + */ +LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity); + + +/*-********************************************* +* Streaming Compression Functions +***********************************************/ +typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */ + +LZ4LIB_API LZ4_stream_t* LZ4_createStream(void); +LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr); + +/*! LZ4_resetStream_fast() : v1.9.0+ + * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks + * (e.g., LZ4_compress_fast_continue()). + * + * An LZ4_stream_t must be initialized once before usage. + * This is automatically done when created by LZ4_createStream(). + * However, should the LZ4_stream_t be simply declared on stack (for example), + * it's necessary to initialize it first, using LZ4_initStream(). + * + * After init, start any new stream with LZ4_resetStream_fast(). + * A same LZ4_stream_t can be re-used multiple times consecutively + * and compress multiple streams, + * provided that it starts each new stream with LZ4_resetStream_fast(). + * + * LZ4_resetStream_fast() is much faster than LZ4_initStream(), + * but is not compatible with memory regions containing garbage data. + * + * Note: it's only useful to call LZ4_resetStream_fast() + * in the context of streaming compression. + * The *extState* functions perform their own resets. + * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive. + */ +LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr); + +/*! LZ4_loadDict() : + * Use this function to reference a static dictionary into LZ4_stream_t. + * The dictionary must remain available during compression. + * LZ4_loadDict() triggers a reset, so any previous data will be forgotten. + * The same dictionary will have to be loaded on decompression side for successful decoding. + * Dictionary are useful for better compression of small data (KB range). + * While LZ4 accept any input as dictionary, + * results are generally better when using Zstandard's Dictionary Builder. + * Loading a size of 0 is allowed, and is the same as reset. + * @return : loaded dictionary size, in bytes (necessarily <= 64 KB) + */ +LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize); + +/*! LZ4_compress_fast_continue() : + * Compress 'src' content using data from previously compressed blocks, for better compression ratio. + * 'dst' buffer must be already allocated. + * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster. + * + * @return : size of compressed block + * or 0 if there is an error (typically, cannot fit into 'dst'). + * + * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block. + * Each block has precise boundaries. + * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata. + * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together. + * + * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory ! + * + * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB. + * Make sure that buffers are separated, by at least one byte. + * This construction ensures that each block only depends on previous block. + * + * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB. + * + * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed. + */ +LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_saveDict() : + * If last 64KB data cannot be guaranteed to remain available at its current memory location, + * save it into a safer place (char* safeBuffer). + * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(), + * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables. + * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error. + */ +LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize); + + +/*-********************************************** +* Streaming Decompression Functions +* Bufferless synchronous API +************************************************/ +typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */ + +/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() : + * creation / destruction of streaming decompression tracking context. + * A tracking context can be re-used multiple times. + */ +LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void); +LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream); + +/*! LZ4_setStreamDecode() : + * An LZ4_streamDecode_t context can be allocated once and re-used multiple times. + * Use this function to start decompression of a new stream of blocks. + * A dictionary can optionally be set. Use NULL or size 0 for a reset order. + * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression. + * @return : 1 if OK, 0 if error + */ +LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize); + +/*! LZ4_decoderRingBufferSize() : v1.8.2+ + * Note : in a ring buffer scenario (optional), + * blocks are presumed decompressed next to each other + * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize), + * at which stage it resumes from beginning of ring buffer. + * When setting such a ring buffer for streaming decompression, + * provides the minimum size of this ring buffer + * to be compatible with any source respecting maxBlockSize condition. + * @return : minimum ring buffer size, + * or 0 if there is an error (invalid maxBlockSize). + */ +LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize); +#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */ + +/*! LZ4_decompress_*_continue() : + * These decoding functions allow decompression of consecutive blocks in "streaming" mode. + * A block is an unsplittable entity, it must be presented entirely to a decompression function. + * Decompression functions only accepts one block at a time. + * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded. + * If less than 64KB of data has been decoded, all the data must be present. + * + * Special : if decompression side sets a ring buffer, it must respect one of the following conditions : + * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize). + * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes. + * In which case, encoding and decoding buffers do not need to be synchronized. + * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize. + * - Synchronized mode : + * Decompression buffer size is _exactly_ the same as compression buffer size, + * and follows exactly same update rule (block boundaries at same positions), + * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream), + * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB). + * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes. + * In which case, encoding and decoding buffers do not need to be synchronized, + * and encoding ring buffer can have any size, including small ones ( < 64 KB). + * + * Whenever these conditions are not possible, + * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression, + * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block. +*/ +LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity); + + +/*! LZ4_decompress_*_usingDict() : + * These decoding functions work the same as + * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue() + * They are stand-alone, and don't need an LZ4_streamDecode_t structure. + * Dictionary is presumed stable : it must remain accessible and unmodified during decompression. + * Performance tip : Decompression speed can be substantially increased + * when dst == dictStart + dictSize. + */ +LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize); + +#endif /* LZ4_H_2983827168210 */ + + +/*^************************************* + * !!!!!! STATIC LINKING ONLY !!!!!! + ***************************************/ + +/*-**************************************************************************** + * Experimental section + * + * Symbols declared in this section must be considered unstable. Their + * signatures or semantics may change, or they may be removed altogether in the + * future. They are therefore only safe to depend on when the caller is + * statically linked against the library. + * + * To protect against unsafe usage, not only are the declarations guarded, + * the definitions are hidden by default + * when building LZ4 as a shared/dynamic library. + * + * In order to access these declarations, + * define LZ4_STATIC_LINKING_ONLY in your application + * before including LZ4's headers. + * + * In order to make their implementations accessible dynamically, you must + * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library. + ******************************************************************************/ + +#ifdef LZ4_STATIC_LINKING_ONLY + +#ifndef LZ4_STATIC_3504398509 +#define LZ4_STATIC_3504398509 + +#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS +#define LZ4LIB_STATIC_API LZ4LIB_API +#else +#define LZ4LIB_STATIC_API +#endif + + +/*! LZ4_compress_fast_extState_fastReset() : + * A variant of LZ4_compress_fast_extState(). + * + * Using this variant avoids an expensive initialization step. + * It is only safe to call if the state buffer is known to be correctly initialized already + * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized"). + * From a high level, the difference is that + * this function initializes the provided state with a call to something like LZ4_resetStream_fast() + * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream(). + */ +LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration); + +/*! LZ4_attach_dictionary() : + * This is an experimental API that allows + * efficient use of a static dictionary many times. + * + * Rather than re-loading the dictionary buffer into a working context before + * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a + * working LZ4_stream_t, this function introduces a no-copy setup mechanism, + * in which the working stream references the dictionary stream in-place. + * + * Several assumptions are made about the state of the dictionary stream. + * Currently, only streams which have been prepared by LZ4_loadDict() should + * be expected to work. + * + * Alternatively, the provided dictionaryStream may be NULL, + * in which case any existing dictionary stream is unset. + * + * If a dictionary is provided, it replaces any pre-existing stream history. + * The dictionary contents are the only history that can be referenced and + * logically immediately precede the data compressed in the first subsequent + * compression call. + * + * The dictionary will only remain attached to the working stream through the + * first compression call, at the end of which it is cleared. The dictionary + * stream (and source buffer) must remain in-place / accessible / unchanged + * through the completion of the first compression call on the stream. + */ +LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream); + + +/*! In-place compression and decompression + * + * It's possible to have input and output sharing the same buffer, + * for highly contrained memory environments. + * In both cases, it requires input to lay at the end of the buffer, + * and decompression to start at beginning of the buffer. + * Buffer size must feature some margin, hence be larger than final size. + * + * |<------------------------buffer--------------------------------->| + * |<-----------compressed data--------->| + * |<-----------decompressed size------------------>| + * |<----margin---->| + * + * This technique is more useful for decompression, + * since decompressed size is typically larger, + * and margin is short. + * + * In-place decompression will work inside any buffer + * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize). + * This presumes that decompressedSize > compressedSize. + * Otherwise, it means compression actually expanded data, + * and it would be more efficient to store such data with a flag indicating it's not compressed. + * This can happen when data is not compressible (already compressed, or encrypted). + * + * For in-place compression, margin is larger, as it must be able to cope with both + * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX, + * and data expansion, which can happen when input is not compressible. + * As a consequence, buffer size requirements are much higher, + * and memory savings offered by in-place compression are more limited. + * + * There are ways to limit this cost for compression : + * - Reduce history size, by modifying LZ4_DISTANCE_MAX. + * Note that it is a compile-time constant, so all compressions will apply this limit. + * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX, + * so it's a reasonable trick when inputs are known to be small. + * - Require the compressor to deliver a "maximum compressed size". + * This is the `dstCapacity` parameter in `LZ4_compress*()`. + * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail, + * in which case, the return code will be 0 (zero). + * The caller must be ready for these cases to happen, + * and typically design a backup scheme to send data uncompressed. + * The combination of both techniques can significantly reduce + * the amount of margin required for in-place compression. + * + * In-place compression can work in any buffer + * which size is >= (maxCompressedSize) + * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success. + * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX, + * so it's possible to reduce memory requirements by playing with them. + */ + +#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32) +#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */ + +#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */ +# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + +#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */ +#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */ + +#endif /* LZ4_STATIC_3504398509 */ +#endif /* LZ4_STATIC_LINKING_ONLY */ + + + +#ifndef LZ4_H_98237428734687 +#define LZ4_H_98237428734687 + +/*-************************************************************ + * PRIVATE DEFINITIONS + ************************************************************** + * Do not use these definitions directly. + * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`. + * Accessing members will expose code to API and/or ABI break in future versions of the library. + **************************************************************/ +#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2) +#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE) +#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */ + +#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +#include + +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { + uint32_t hashTable[LZ4_HASH_SIZE_U32]; + uint32_t currentOffset; + uint16_t dirty; + uint16_t tableType; + const uint8_t* dictionary; + const LZ4_stream_t_internal* dictCtx; + uint32_t dictSize; +}; + +typedef struct { + const uint8_t* externalDict; + size_t extDictSize; + const uint8_t* prefixEnd; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#else + +typedef struct LZ4_stream_t_internal LZ4_stream_t_internal; +struct LZ4_stream_t_internal { + unsigned int hashTable[LZ4_HASH_SIZE_U32]; + unsigned int currentOffset; + unsigned short dirty; + unsigned short tableType; + const unsigned char* dictionary; + const LZ4_stream_t_internal* dictCtx; + unsigned int dictSize; +}; + +typedef struct { + const unsigned char* externalDict; + const unsigned char* prefixEnd; + size_t extDictSize; + size_t prefixSize; +} LZ4_streamDecode_t_internal; + +#endif + +/*! LZ4_stream_t : + * information structure to track an LZ4 stream. + * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended. + * The structure definition can be convenient for static allocation + * (on stack, or as part of larger structure). + * Init this structure with LZ4_initStream() before first use. + * note : only use this definition in association with static linking ! + * this definition is not API/ABI safe, and may change in a future version. + */ +#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4 + ((sizeof(void*)==16) ? 4 : 0) /*AS-400*/ ) +#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long)) +union LZ4_stream_u { + unsigned long long table[LZ4_STREAMSIZE_U64]; + LZ4_stream_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_stream_t */ + +/*! LZ4_initStream() : v1.9.0+ + * An LZ4_stream_t structure must be initialized at least once. + * This is automatically done when invoking LZ4_createStream(), + * but it's not when the structure is simply declared on stack (for example). + * + * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t. + * It can also initialize any arbitrary buffer of sufficient size, + * and will @return a pointer of proper type upon initialization. + * + * Note : initialization fails if size and alignment conditions are not respected. + * In which case, the function will @return NULL. + * Note2: An LZ4_stream_t structure guarantees correct alignment and size. + * Note3: Before v1.9.0, use LZ4_resetStream() instead + */ +LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size); + + +/*! LZ4_streamDecode_t : + * information structure to track an LZ4 stream during decompression. + * init this structure using LZ4_setStreamDecode() before first use. + * note : only use in association with static linking ! + * this definition is not API/ABI safe, + * and may change in a future version ! + */ +#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ ) +#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long)) +union LZ4_streamDecode_u { + unsigned long long table[LZ4_STREAMDECODESIZE_U64]; + LZ4_streamDecode_t_internal internal_donotuse; +} ; /* previously typedef'd to LZ4_streamDecode_t */ + + + +/*-************************************ +* Obsolete Functions +**************************************/ + +/*! Deprecation warnings + * + * Deprecated functions make the compiler generate a warning when invoked. + * This is meant to invite users to update their source code. + * Should deprecation warnings be a problem, it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc + * or _CRT_SECURE_NO_WARNINGS in Visual. + * + * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS + * before including the header file. + */ +#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS +# define LZ4_DEPRECATED(message) /* disable deprecation warnings */ +#else +# define LZ4_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define LZ4_DEPRECATED(message) [[deprecated(message)]] +# elif (LZ4_GCC_VERSION >= 405) || defined(__clang__) +# define LZ4_DEPRECATED(message) __attribute__((deprecated(message))) +# elif (LZ4_GCC_VERSION >= 301) +# define LZ4_DEPRECATED(message) __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define LZ4_DEPRECATED(message) __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement LZ4_DEPRECATED for this compiler") +# define LZ4_DEPRECATED(message) +# endif +#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */ + +/* Obsolete compression functions */ +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize); +LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize); +LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize); + +/* Obsolete decompression functions */ +LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize); +LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize); + +/* Obsolete streaming functions; degraded functionality; do not use! + * + * In order to perform streaming compression, these functions depended on data + * that is no longer tracked in the state. They have been preserved as well as + * possible: using them will still produce a correct output. However, they don't + * actually retain any history between compression calls. The compression ratio + * achieved will therefore be no better than compressing each chunk + * independently. + */ +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void); +LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer); +LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state); + +/* Obsolete streaming decoding functions */ +LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize); +LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize); + +/*! LZ4_decompress_fast() : **unsafe!** + * These functions used to be faster than LZ4_decompress_safe(), + * but it has changed, and they are now slower than LZ4_decompress_safe(). + * This is because LZ4_decompress_fast() doesn't know the input size, + * and therefore must progress more cautiously in the input buffer to not read beyond the end of block. + * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability. + * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated. + * + * The last remaining LZ4_decompress_fast() specificity is that + * it can decompress a block without knowing its compressed size. + * Such functionality could be achieved in a more secure manner, + * by also providing the maximum size of input buffer, + * but it would require new prototypes, and adaptation of the implementation to this new use case. + * + * Parameters: + * originalSize : is the uncompressed size to regenerate. + * `dst` must be already allocated, its size must be >= 'originalSize' bytes. + * @return : number of bytes read from source buffer (== compressed size). + * The function expects to finish at block's end exactly. + * If the source stream is detected malformed, the function stops decoding and returns a negative result. + * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer. + * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds. + * Also, since match offsets are not validated, match reads from 'src' may underflow too. + * These issues never happen if input (compressed) data is correct. + * But they may happen if input data is invalid (error or intentional tampering). + * As a consequence, use these functions in trusted environments with trusted data **only**. + */ + +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead") +LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead") +LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize); +LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead") +LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize); + +/*! LZ4_resetStream() : + * An LZ4_stream_t structure must be initialized at least once. + * This is done with LZ4_initStream(), or LZ4_resetStream(). + * Consider switching to LZ4_initStream(), + * invoking LZ4_resetStream() will trigger deprecation warnings in the future. + */ +LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr); + + +#endif /* LZ4_H_98237428734687 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/borg/algorithms/msgpack/__init__.py b/src/borg/algorithms/msgpack/__init__.py new file mode 100644 index 0000000000..3a3cf3a5d4 --- /dev/null +++ b/src/borg/algorithms/msgpack/__init__.py @@ -0,0 +1,66 @@ +# coding: utf-8 +from ._version import version +from .exceptions import * + +from collections import namedtuple + + +class ExtType(namedtuple('ExtType', 'code data')): + """ExtType represents ext type in msgpack.""" + def __new__(cls, code, data): + if not isinstance(code, int): + raise TypeError("code must be int") + if not isinstance(data, bytes): + raise TypeError("data must be bytes") + if not 0 <= code <= 127: + raise ValueError("code must be 0~127") + return super(ExtType, cls).__new__(cls, code, data) + + +import os +if os.environ.get('MSGPACK_PUREPYTHON'): + from .fallback import Packer, unpackb, Unpacker +else: + try: + from ._packer import Packer + from ._unpacker import unpackb, Unpacker + except ImportError: + from .fallback import Packer, unpackb, Unpacker + + +def pack(o, stream, **kwargs): + """ + Pack object `o` and write it to `stream` + + See :class:`Packer` for options. + """ + packer = Packer(**kwargs) + stream.write(packer.pack(o)) + + +def packb(o, **kwargs): + """ + Pack object `o` and return packed bytes + + See :class:`Packer` for options. + """ + return Packer(**kwargs).pack(o) + + +def unpack(stream, **kwargs): + """ + Unpack an object from `stream`. + + Raises `ExtraData` when `stream` contains extra bytes. + See :class:`Unpacker` for options. + """ + data = stream.read() + return unpackb(data, **kwargs) + + +# alias for compatibility to simplejson/marshal/pickle. +load = unpack +loads = unpackb + +dump = pack +dumps = packb diff --git a/src/borg/algorithms/msgpack/_packer.pyx b/src/borg/algorithms/msgpack/_packer.pyx new file mode 100644 index 0000000000..2aba15105d --- /dev/null +++ b/src/borg/algorithms/msgpack/_packer.pyx @@ -0,0 +1,357 @@ +# coding: utf-8 +#cython: embedsignature=True, c_string_encoding=ascii, language_level=2 + +from cpython cimport * +from cpython.version cimport PY_MAJOR_VERSION +from cpython.exc cimport PyErr_WarnEx + +from .exceptions import PackValueError, PackOverflowError +from . import ExtType + + +cdef extern from "Python.h": + + int PyMemoryView_Check(object obj) + int PyByteArray_Check(object obj) + int PyByteArray_CheckExact(object obj) + char* PyUnicode_AsUTF8AndSize(object obj, Py_ssize_t *l) except NULL + + +cdef extern from "pack.h": + struct msgpack_packer: + char* buf + size_t length + size_t buf_size + bint use_bin_type + + int msgpack_pack_int(msgpack_packer* pk, int d) + int msgpack_pack_nil(msgpack_packer* pk) + int msgpack_pack_true(msgpack_packer* pk) + int msgpack_pack_false(msgpack_packer* pk) + int msgpack_pack_long(msgpack_packer* pk, long d) + int msgpack_pack_long_long(msgpack_packer* pk, long long d) + int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d) + int msgpack_pack_float(msgpack_packer* pk, float d) + int msgpack_pack_double(msgpack_packer* pk, double d) + int msgpack_pack_array(msgpack_packer* pk, size_t l) + int msgpack_pack_map(msgpack_packer* pk, size_t l) + int msgpack_pack_raw(msgpack_packer* pk, size_t l) + int msgpack_pack_bin(msgpack_packer* pk, size_t l) + int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l) + int msgpack_pack_ext(msgpack_packer* pk, char typecode, size_t l) + int msgpack_pack_unicode(msgpack_packer* pk, object o, long long limit) + +cdef int DEFAULT_RECURSE_LIMIT=511 +cdef long long ITEM_LIMIT = (2**32)-1 + + +cdef inline int PyBytesLike_Check(object o): + return PyBytes_Check(o) or PyByteArray_Check(o) + + +cdef inline int PyBytesLike_CheckExact(object o): + return PyBytes_CheckExact(o) or PyByteArray_CheckExact(o) + + +cdef class Packer(object): + """ + MessagePack Packer + + usage:: + + packer = Packer() + astream.write(packer.pack(a)) + astream.write(packer.pack(b)) + + Packer's constructor has some keyword arguments: + + :param callable default: + Convert user type to builtin type that Packer supports. + See also simplejson's document. + + :param bool use_single_float: + Use single precision float type for float. (default: False) + + :param bool autoreset: + Reset buffer after each pack and return its content as `bytes`. (default: True). + If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + + :param bool use_bin_type: + Use bin type introduced in msgpack spec 2.0 for bytes. + It also enables str8 type for unicode. + Current default value is false, but it will be changed to true + in future version. You should specify it explicitly. + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializeable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param str unicode_errors: + Error handler for encoding unicode. (default: 'strict') + + :param str encoding: + (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') + """ + cdef msgpack_packer pk + cdef object _default + cdef object _bencoding + cdef object _berrors + cdef const char *encoding + cdef const char *unicode_errors + cdef bint strict_types + cdef bool use_float + cdef bint autoreset + + def __cinit__(self): + cdef int buf_size = 1024*1024 + self.pk.buf = PyMem_Malloc(buf_size) + if self.pk.buf == NULL: + raise MemoryError("Unable to allocate internal buffer.") + self.pk.buf_size = buf_size + self.pk.length = 0 + + def __init__(self, default=None, encoding=None, unicode_errors=None, + bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + bint strict_types=False): + if encoding is not None: + PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated.", 1) + self.use_float = use_single_float + self.strict_types = strict_types + self.autoreset = autoreset + self.pk.use_bin_type = use_bin_type + if default is not None: + if not PyCallable_Check(default): + raise TypeError("default must be a callable.") + self._default = default + + self._bencoding = encoding + if encoding is None: + if PY_MAJOR_VERSION < 3: + self.encoding = 'utf-8' + else: + self.encoding = NULL + else: + self.encoding = self._bencoding + + self._berrors = unicode_errors + if unicode_errors is None: + self.unicode_errors = NULL + else: + self.unicode_errors = self._berrors + + def __dealloc__(self): + PyMem_Free(self.pk.buf) + self.pk.buf = NULL + + cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: + cdef long long llval + cdef unsigned long long ullval + cdef long longval + cdef float fval + cdef double dval + cdef char* rawval + cdef int ret + cdef dict d + cdef Py_ssize_t L + cdef int default_used = 0 + cdef bint strict_types = self.strict_types + cdef Py_buffer view + + if nest_limit < 0: + raise PackValueError("recursion limit exceeded.") + + while True: + if o is None: + ret = msgpack_pack_nil(&self.pk) + elif PyBool_Check(o) if strict_types else isinstance(o, bool): + if o: + ret = msgpack_pack_true(&self.pk) + else: + ret = msgpack_pack_false(&self.pk) + elif PyLong_CheckExact(o) if strict_types else PyLong_Check(o): + # PyInt_Check(long) is True for Python 3. + # So we should test long before int. + try: + if o > 0: + ullval = o + ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) + else: + llval = o + ret = msgpack_pack_long_long(&self.pk, llval) + except OverflowError as oe: + if not default_used and self._default is not None: + o = self._default(o) + default_used = True + continue + else: + raise PackOverflowError("Integer value out of range") + elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): + longval = o + ret = msgpack_pack_long(&self.pk, longval) + elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + if self.use_float: + fval = o + ret = msgpack_pack_float(&self.pk, fval) + else: + dval = o + ret = msgpack_pack_double(&self.pk, dval) + elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + L = len(o) + if L > ITEM_LIMIT: + raise PackValueError("%s is too large" % type(o).__name__) + rawval = o + ret = msgpack_pack_bin(&self.pk, L) + if ret == 0: + ret = msgpack_pack_raw_body(&self.pk, rawval, L) + elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + if self.encoding == NULL and self.unicode_errors == NULL: + ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + if ret == -2: + raise PackValueError("unicode string is too large") + else: + o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + L = len(o) + if L > ITEM_LIMIT: + raise PackValueError("unicode string is too large") + ret = msgpack_pack_raw(&self.pk, L) + if ret == 0: + rawval = o + ret = msgpack_pack_raw_body(&self.pk, rawval, L) + elif PyDict_CheckExact(o): + d = o + L = len(d) + if L > ITEM_LIMIT: + raise PackValueError("dict is too large") + ret = msgpack_pack_map(&self.pk, L) + if ret == 0: + for k, v in d.iteritems(): + ret = self._pack(k, nest_limit-1) + if ret != 0: break + ret = self._pack(v, nest_limit-1) + if ret != 0: break + elif not strict_types and PyDict_Check(o): + L = len(o) + if L > ITEM_LIMIT: + raise PackValueError("dict is too large") + ret = msgpack_pack_map(&self.pk, L) + if ret == 0: + for k, v in o.items(): + ret = self._pack(k, nest_limit-1) + if ret != 0: break + ret = self._pack(v, nest_limit-1) + if ret != 0: break + elif type(o) is ExtType if strict_types else isinstance(o, ExtType): + # This should be before Tuple because ExtType is namedtuple. + longval = o.code + rawval = o.data + L = len(o.data) + if L > ITEM_LIMIT: + raise PackValueError("EXT data is too large") + ret = msgpack_pack_ext(&self.pk, longval, L) + ret = msgpack_pack_raw_body(&self.pk, rawval, L) + elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + L = len(o) + if L > ITEM_LIMIT: + raise PackValueError("list is too large") + ret = msgpack_pack_array(&self.pk, L) + if ret == 0: + for v in o: + ret = self._pack(v, nest_limit-1) + if ret != 0: break + elif PyMemoryView_Check(o): + if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + raise PackValueError("could not get buffer for memoryview") + L = view.len + if L > ITEM_LIMIT: + PyBuffer_Release(&view); + raise PackValueError("memoryview is too large") + ret = msgpack_pack_bin(&self.pk, L) + if ret == 0: + ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + PyBuffer_Release(&view); + elif not default_used and self._default: + o = self._default(o) + default_used = 1 + continue + else: + raise TypeError("can't serialize %r" % (o,)) + return ret + + cpdef pack(self, object obj): + cdef int ret + try: + ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + except: + self.pk.length = 0 + raise + if ret: # should not happen. + raise RuntimeError("internal error") + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def pack_ext_type(self, typecode, data): + msgpack_pack_ext(&self.pk, typecode, len(data)) + msgpack_pack_raw_body(&self.pk, data, len(data)) + + def pack_array_header(self, long long size): + if size > ITEM_LIMIT: + raise PackValueError + cdef int ret = msgpack_pack_array(&self.pk, size) + if ret == -1: + raise MemoryError + elif ret: # should not happen + raise TypeError + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def pack_map_header(self, long long size): + if size > ITEM_LIMIT: + raise PackValueError + cdef int ret = msgpack_pack_map(&self.pk, size) + if ret == -1: + raise MemoryError + elif ret: # should not happen + raise TypeError + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def pack_map_pairs(self, object pairs): + """ + Pack *pairs* as msgpack map type. + + *pairs* should be a sequence of pairs. + (`len(pairs)` and `for k, v in pairs:` should be supported.) + """ + cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) + if ret == 0: + for k, v in pairs: + ret = self._pack(k) + if ret != 0: break + ret = self._pack(v) + if ret != 0: break + if ret == -1: + raise MemoryError + elif ret: # should not happen + raise TypeError + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def reset(self): + """Clear internal buffer.""" + self.pk.length = 0 + + def bytes(self): + """Return buffer content.""" + return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) diff --git a/src/borg/algorithms/msgpack/_unpacker.pyx b/src/borg/algorithms/msgpack/_unpacker.pyx new file mode 100644 index 0000000000..ec3ee7ca17 --- /dev/null +++ b/src/borg/algorithms/msgpack/_unpacker.pyx @@ -0,0 +1,527 @@ +# coding: utf-8 +#cython: embedsignature=True, c_string_encoding=ascii, language_level=2 + +from cpython.version cimport PY_MAJOR_VERSION +from cpython.bytes cimport ( + PyBytes_AsString, + PyBytes_FromStringAndSize, + PyBytes_Size, +) +from cpython.buffer cimport ( + Py_buffer, + PyObject_CheckBuffer, + PyObject_GetBuffer, + PyBuffer_Release, + PyBuffer_IsContiguous, + PyBUF_READ, + PyBUF_SIMPLE, + PyBUF_FULL_RO, +) +from cpython.mem cimport PyMem_Malloc, PyMem_Free +from cpython.object cimport PyCallable_Check +from cpython.ref cimport Py_DECREF +from cpython.exc cimport PyErr_WarnEx + +cdef extern from "Python.h": + ctypedef struct PyObject + cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1 + object PyMemoryView_GetContiguous(object obj, int buffertype, char order) + +from libc.stdlib cimport * +from libc.string cimport * +from libc.limits cimport * +ctypedef unsigned long long uint64_t + +from .exceptions import ( + BufferFull, + OutOfData, + UnpackValueError, + ExtraData, +) +from . import ExtType + + +cdef extern from "unpack.h": + ctypedef struct msgpack_user: + bint use_list + bint raw + bint has_pairs_hook # call object_hook with k-v pairs + PyObject* object_hook + PyObject* list_hook + PyObject* ext_hook + char *encoding + char *unicode_errors + Py_ssize_t max_str_len + Py_ssize_t max_bin_len + Py_ssize_t max_array_len + Py_ssize_t max_map_len + Py_ssize_t max_ext_len + + ctypedef struct unpack_context: + msgpack_user user + PyObject* obj + Py_ssize_t count + + ctypedef int (*execute_fn)(unpack_context* ctx, const char* data, + Py_ssize_t len, Py_ssize_t* off) except? -1 + execute_fn unpack_construct + execute_fn unpack_skip + execute_fn read_array_header + execute_fn read_map_header + void unpack_init(unpack_context* ctx) + object unpack_data(unpack_context* ctx) + void unpack_clear(unpack_context* ctx) + +cdef inline init_ctx(unpack_context *ctx, + object object_hook, object object_pairs_hook, + object list_hook, object ext_hook, + bint use_list, bint raw, + const char* encoding, const char* unicode_errors, + Py_ssize_t max_str_len, Py_ssize_t max_bin_len, + Py_ssize_t max_array_len, Py_ssize_t max_map_len, + Py_ssize_t max_ext_len): + unpack_init(ctx) + ctx.user.use_list = use_list + ctx.user.raw = raw + ctx.user.object_hook = ctx.user.list_hook = NULL + ctx.user.max_str_len = max_str_len + ctx.user.max_bin_len = max_bin_len + ctx.user.max_array_len = max_array_len + ctx.user.max_map_len = max_map_len + ctx.user.max_ext_len = max_ext_len + + if object_hook is not None and object_pairs_hook is not None: + raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") + + if object_hook is not None: + if not PyCallable_Check(object_hook): + raise TypeError("object_hook must be a callable.") + ctx.user.object_hook = object_hook + + if object_pairs_hook is None: + ctx.user.has_pairs_hook = False + else: + if not PyCallable_Check(object_pairs_hook): + raise TypeError("object_pairs_hook must be a callable.") + ctx.user.object_hook = object_pairs_hook + ctx.user.has_pairs_hook = True + + if list_hook is not None: + if not PyCallable_Check(list_hook): + raise TypeError("list_hook must be a callable.") + ctx.user.list_hook = list_hook + + if ext_hook is not None: + if not PyCallable_Check(ext_hook): + raise TypeError("ext_hook must be a callable.") + ctx.user.ext_hook = ext_hook + + ctx.user.encoding = encoding + ctx.user.unicode_errors = unicode_errors + +def default_read_extended_type(typecode, data): + raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + +cdef inline int get_data_from_buffer(object obj, + Py_buffer *view, + char **buf, + Py_ssize_t *buffer_len) except 0: + cdef object contiguous + cdef Py_buffer tmp + if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + raise + if view.itemsize != 1: + PyBuffer_Release(view) + raise BufferError("cannot unpack from multi-byte object") + if PyBuffer_IsContiguous(view, b'A') == 0: + PyBuffer_Release(view) + # create a contiguous copy and get buffer + contiguous = PyMemoryView_GetContiguous(obj, PyBUF_READ, b'C') + PyObject_GetBuffer(contiguous, view, PyBUF_SIMPLE) + # view must hold the only reference to contiguous, + # so memory is freed when view is released + Py_DECREF(contiguous) + buffer_len[0] = view.len + buf[0] = view.buf + return 1 + + +def unpackb(object packed, object object_hook=None, object list_hook=None, + bint use_list=True, bint raw=True, + encoding=None, unicode_errors=None, + object_pairs_hook=None, ext_hook=ExtType, + Py_ssize_t max_str_len=2147483647, # 2**32-1 + Py_ssize_t max_bin_len=2147483647, + Py_ssize_t max_array_len=2147483647, + Py_ssize_t max_map_len=2147483647, + Py_ssize_t max_ext_len=2147483647): + """ + Unpack packed_bytes to object. Returns an unpacked object. + + Raises `ValueError` when `packed` contains extra bytes. + + See :class:`Unpacker` for options. + """ + cdef unpack_context ctx + cdef Py_ssize_t off = 0 + cdef int ret + + cdef Py_buffer view + cdef char* buf = NULL + cdef Py_ssize_t buf_len + cdef const char* cenc = NULL + cdef const char* cerr = NULL + + if encoding is not None: + PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + cenc = encoding + + if unicode_errors is not None: + cerr = unicode_errors + + get_data_from_buffer(packed, &view, &buf, &buf_len) + try: + init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook, + use_list, raw, cenc, cerr, + max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len) + ret = unpack_construct(&ctx, buf, buf_len, &off) + finally: + PyBuffer_Release(&view); + + if ret == 1: + obj = unpack_data(&ctx) + if off < buf_len: + raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + return obj + unpack_clear(&ctx) + raise UnpackValueError("Unpack failed: error = %d" % (ret,)) + + +def unpack(object stream, **kwargs): + PyErr_WarnEx( + PendingDeprecationWarning, + "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", 1) + data = stream.read() + return unpackb(data, **kwargs) + + +cdef class Unpacker(object): + """Streaming unpacker. + + arguments: + + :param file_like: + File-like object having `.read(n)` method. + If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. + + :param int read_size: + Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`) + + :param bool use_list: + If true, unpack msgpack array to Python list. + Otherwise, unpack to Python tuple. (default: True) + + :param bool raw: + If true, unpack msgpack raw to Python bytes (default). + Otherwise, unpack to Python str (or unicode on Python 2) by decoding + with UTF-8 encoding (recommended). + Currently, the default is true, but it will be changed to false in + near future. So you must specify it explicitly for keeping backward + compatibility. + + *encoding* option which is deprecated overrides this option. + + :param callable object_hook: + When specified, it should be callable. + Unpacker calls it with a dict argument after unpacking msgpack map. + (See also simplejson) + + :param callable object_pairs_hook: + When specified, it should be callable. + Unpacker calls it with a list of key-value pairs after unpacking msgpack map. + (See also simplejson) + + :param int max_buffer_size: + Limits size of data waiting unpacked. 0 means system's INT_MAX (default). + Raises `BufferFull` exception when it is insufficient. + You should set this parameter when unpacking data from untrusted source. + + :param int max_str_len: + Limits max length of str. (default: 2**31-1) + + :param int max_bin_len: + Limits max length of bin. (default: 2**31-1) + + :param int max_array_len: + Limits max length of array. (default: 2**31-1) + + :param int max_map_len: + Limits max length of map. (default: 2**31-1) + + :param str encoding: + Deprecated, use raw instead. + Encoding used for decoding msgpack raw. + If it is None (default), msgpack raw is deserialized to Python bytes. + + :param str unicode_errors: + Error handler used for decoding str type. (default: `'strict'`) + + + Example of streaming deserialize from file-like object:: + + unpacker = Unpacker(file_like, raw=False) + for o in unpacker: + process(o) + + Example of streaming deserialize from socket:: + + unpacker = Unpacker(raw=False) + while True: + buf = sock.recv(1024**2) + if not buf: + break + unpacker.feed(buf) + for o in unpacker: + process(o) + """ + cdef unpack_context ctx + cdef char* buf + cdef Py_ssize_t buf_size, buf_head, buf_tail + cdef object file_like + cdef object file_like_read + cdef Py_ssize_t read_size + # To maintain refcnt. + cdef object object_hook, object_pairs_hook, list_hook, ext_hook + cdef object encoding, unicode_errors + cdef Py_ssize_t max_buffer_size + cdef uint64_t stream_offset + + def __cinit__(self): + self.buf = NULL + + def __dealloc__(self): + PyMem_Free(self.buf) + self.buf = NULL + + def __init__(self, file_like=None, Py_ssize_t read_size=0, + bint use_list=True, bint raw=True, + object object_hook=None, object object_pairs_hook=None, object list_hook=None, + encoding=None, unicode_errors=None, int max_buffer_size=0, + object ext_hook=ExtType, + Py_ssize_t max_str_len=2147483647, # 2**32-1 + Py_ssize_t max_bin_len=2147483647, + Py_ssize_t max_array_len=2147483647, + Py_ssize_t max_map_len=2147483647, + Py_ssize_t max_ext_len=2147483647): + cdef const char *cenc=NULL, + cdef const char *cerr=NULL + + self.object_hook = object_hook + self.object_pairs_hook = object_pairs_hook + self.list_hook = list_hook + self.ext_hook = ext_hook + + self.file_like = file_like + if file_like: + self.file_like_read = file_like.read + if not PyCallable_Check(self.file_like_read): + raise TypeError("`file_like.read` must be a callable.") + if not max_buffer_size: + max_buffer_size = INT_MAX + if read_size > max_buffer_size: + raise ValueError("read_size should be less or equal to max_buffer_size") + if not read_size: + read_size = min(max_buffer_size, 1024**2) + self.max_buffer_size = max_buffer_size + self.read_size = read_size + self.buf = PyMem_Malloc(read_size) + if self.buf == NULL: + raise MemoryError("Unable to allocate internal buffer.") + self.buf_size = read_size + self.buf_head = 0 + self.buf_tail = 0 + self.stream_offset = 0 + + if encoding is not None: + PyErr_WarnEx(PendingDeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + self.encoding = encoding + cenc = encoding + + if unicode_errors is not None: + self.unicode_errors = unicode_errors + cerr = unicode_errors + + init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, + ext_hook, use_list, raw, cenc, cerr, + max_str_len, max_bin_len, max_array_len, + max_map_len, max_ext_len) + + def feed(self, object next_bytes): + """Append `next_bytes` to internal buffer.""" + cdef Py_buffer pybuff + cdef char* buf + cdef Py_ssize_t buf_len + + if self.file_like is not None: + raise AssertionError( + "unpacker.feed() is not be able to use with `file_like`.") + + get_data_from_buffer(next_bytes, &pybuff, &buf, &buf_len) + try: + self.append_buffer(buf, buf_len) + finally: + PyBuffer_Release(&pybuff) + + cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): + cdef: + char* buf = self.buf + char* new_buf + Py_ssize_t head = self.buf_head + Py_ssize_t tail = self.buf_tail + Py_ssize_t buf_size = self.buf_size + Py_ssize_t new_size + + if tail + _buf_len > buf_size: + if ((tail - head) + _buf_len) <= buf_size: + # move to front. + memmove(buf, buf + head, tail - head) + tail -= head + head = 0 + else: + # expand buffer. + new_size = (tail-head) + _buf_len + if new_size > self.max_buffer_size: + raise BufferFull + new_size = min(new_size*2, self.max_buffer_size) + new_buf = PyMem_Malloc(new_size) + if new_buf == NULL: + # self.buf still holds old buffer and will be freed during + # obj destruction + raise MemoryError("Unable to enlarge internal buffer.") + memcpy(new_buf, buf + head, tail - head) + PyMem_Free(buf) + + buf = new_buf + buf_size = new_size + tail -= head + head = 0 + + memcpy(buf + tail, (_buf), _buf_len) + self.buf = buf + self.buf_head = head + self.buf_size = buf_size + self.buf_tail = tail + _buf_len + + cdef read_from_file(self): + next_bytes = self.file_like_read( + min(self.read_size, + self.max_buffer_size - (self.buf_tail - self.buf_head) + )) + if next_bytes: + self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) + else: + self.file_like = None + + cdef object _unpack(self, execute_fn execute, object write_bytes, bint iter=0): + cdef int ret + cdef object obj + cdef Py_ssize_t prev_head + + if write_bytes is not None: + PyErr_WarnEx(DeprecationWarning, "`write_bytes` option is deprecated. Use `.tell()` instead.", 1) + + if self.buf_head >= self.buf_tail and self.file_like is not None: + self.read_from_file() + + while 1: + prev_head = self.buf_head + if prev_head >= self.buf_tail: + if iter: + raise StopIteration("No more data to unpack.") + else: + raise OutOfData("No more data to unpack.") + + try: + ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) + self.stream_offset += self.buf_head - prev_head + if write_bytes is not None: + write_bytes(PyBytes_FromStringAndSize(self.buf + prev_head, self.buf_head - prev_head)) + + if ret == 1: + obj = unpack_data(&self.ctx) + unpack_init(&self.ctx) + return obj + elif ret == 0: + if self.file_like is not None: + self.read_from_file() + continue + if iter: + raise StopIteration("No more data to unpack.") + else: + raise OutOfData("No more data to unpack.") + else: + raise UnpackValueError("Unpack failed: error = %d" % (ret,)) + except ValueError as e: + raise UnpackValueError(e) + + def read_bytes(self, Py_ssize_t nbytes): + """Read a specified number of raw bytes from the stream""" + cdef Py_ssize_t nread + nread = min(self.buf_tail - self.buf_head, nbytes) + ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) + self.buf_head += nread + if len(ret) < nbytes and self.file_like is not None: + ret += self.file_like.read(nbytes - len(ret)) + return ret + + def unpack(self, object write_bytes=None): + """Unpack one object + + If write_bytes is not None, it will be called with parts of the raw + message as it is unpacked. + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(unpack_construct, write_bytes) + + def skip(self, object write_bytes=None): + """Read and ignore one object, returning None + + If write_bytes is not None, it will be called with parts of the raw + message as it is unpacked. + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(unpack_skip, write_bytes) + + def read_array_header(self, object write_bytes=None): + """assuming the next object is an array, return its size n, such that + the next n unpack() calls will iterate over its contents. + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(read_array_header, write_bytes) + + def read_map_header(self, object write_bytes=None): + """assuming the next object is a map, return its size n, such that the + next n * 2 unpack() calls will iterate over its key-value pairs. + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(read_map_header, write_bytes) + + def tell(self): + return self.stream_offset + + def __iter__(self): + return self + + def __next__(self): + return self._unpack(unpack_construct, None, 1) + + # for debug. + #def _buf(self): + # return PyString_FromStringAndSize(self.buf, self.buf_tail) + + #def _off(self): + # return self.buf_head diff --git a/src/borg/algorithms/msgpack/_version.py b/src/borg/algorithms/msgpack/_version.py new file mode 100644 index 0000000000..7bd3b0dd51 --- /dev/null +++ b/src/borg/algorithms/msgpack/_version.py @@ -0,0 +1,6 @@ +# This is a bundled msgpack 0.5.6 with local modifications. +# Changes: +# +borg1: drop support for old buffer protocol to be compatible with py310 +# (backport of commit 9ae43709e42092c7f6a4e990d696d9005fa1623d) +version = (0, 5, 6, '+borg1') + diff --git a/src/borg/algorithms/msgpack/exceptions.py b/src/borg/algorithms/msgpack/exceptions.py new file mode 100644 index 0000000000..97668814f2 --- /dev/null +++ b/src/borg/algorithms/msgpack/exceptions.py @@ -0,0 +1,41 @@ +class UnpackException(Exception): + """Deprecated. Use Exception instead to catch all exception during unpacking.""" + + +class BufferFull(UnpackException): + pass + + +class OutOfData(UnpackException): + pass + + +class UnpackValueError(UnpackException, ValueError): + """Deprecated. Use ValueError instead.""" + + +class ExtraData(UnpackValueError): + def __init__(self, unpacked, extra): + self.unpacked = unpacked + self.extra = extra + + def __str__(self): + return "unpack(b) received extra data." + + +class PackException(Exception): + """Deprecated. Use Exception instead to catch all exception during packing.""" + + +class PackValueError(PackException, ValueError): + """PackValueError is raised when type of input data is supported but it's value is unsupported. + + Deprecated. Use ValueError instead. + """ + + +class PackOverflowError(PackValueError, OverflowError): + """PackOverflowError is raised when integer value is out of range of msgpack support [-2**31, 2**32). + + Deprecated. Use ValueError instead. + """ diff --git a/src/borg/algorithms/msgpack/fallback.py b/src/borg/algorithms/msgpack/fallback.py new file mode 100644 index 0000000000..fd01bf8257 --- /dev/null +++ b/src/borg/algorithms/msgpack/fallback.py @@ -0,0 +1,971 @@ +"""Fallback pure Python implementation of msgpack""" + +import sys +import struct +import warnings + +if sys.version_info[0] == 3: + PY3 = True + int_types = int + Unicode = str + xrange = range + def dict_iteritems(d): + return d.items() +else: + PY3 = False + int_types = (int, long) + Unicode = unicode + def dict_iteritems(d): + return d.iteritems() + + +if hasattr(sys, 'pypy_version_info'): + # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own + # StringBuilder is fastest. + from __pypy__ import newlist_hint + try: + from __pypy__.builders import BytesBuilder as StringBuilder + except ImportError: + from __pypy__.builders import StringBuilder + USING_STRINGBUILDER = True + class StringIO(object): + def __init__(self, s=b''): + if s: + self.builder = StringBuilder(len(s)) + self.builder.append(s) + else: + self.builder = StringBuilder() + def write(self, s): + if isinstance(s, memoryview): + s = s.tobytes() + elif isinstance(s, bytearray): + s = bytes(s) + self.builder.append(s) + def getvalue(self): + return self.builder.build() +else: + USING_STRINGBUILDER = False + from io import BytesIO as StringIO + newlist_hint = lambda size: [] + + +from .exceptions import ( + BufferFull, + OutOfData, + UnpackValueError, + PackValueError, + PackOverflowError, + ExtraData) + +from . import ExtType + + +EX_SKIP = 0 +EX_CONSTRUCT = 1 +EX_READ_ARRAY_HEADER = 2 +EX_READ_MAP_HEADER = 3 + +TYPE_IMMEDIATE = 0 +TYPE_ARRAY = 1 +TYPE_MAP = 2 +TYPE_RAW = 3 +TYPE_BIN = 4 +TYPE_EXT = 5 + +DEFAULT_RECURSE_LIMIT = 511 + + +def _check_type_strict(obj, t, type=type, tuple=tuple): + if type(t) is tuple: + return type(obj) in t + else: + return type(obj) is t + + +def _get_data_from_buffer(obj): + try: + view = memoryview(obj) + except TypeError: + # try to use legacy buffer protocol if 2.7, otherwise re-raise + if not PY3: + view = memoryview(buffer(obj)) + warnings.warn("using old buffer interface to unpack %s; " + "this leads to unpacking errors if slicing is used and " + "will be removed in a future version" % type(obj), + RuntimeWarning) + else: + raise + if view.itemsize != 1: + raise ValueError("cannot unpack from multi-byte object") + return view + + +def unpack(stream, **kwargs): + warnings.warn( + "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", + PendingDeprecationWarning) + data = stream.read() + return unpackb(data, **kwargs) + + +def unpackb(packed, **kwargs): + """ + Unpack an object from `packed`. + + Raises `ExtraData` when `packed` contains extra bytes. + See :class:`Unpacker` for options. + """ + unpacker = Unpacker(None, **kwargs) + unpacker.feed(packed) + try: + ret = unpacker._unpack() + except OutOfData: + raise UnpackValueError("Data is not enough.") + if unpacker._got_extradata(): + raise ExtraData(ret, unpacker._get_extradata()) + return ret + + +class Unpacker(object): + """Streaming unpacker. + + arguments: + + :param file_like: + File-like object having `.read(n)` method. + If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. + + :param int read_size: + Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) + + :param bool use_list: + If true, unpack msgpack array to Python list. + Otherwise, unpack to Python tuple. (default: True) + + :param bool raw: + If true, unpack msgpack raw to Python bytes (default). + Otherwise, unpack to Python str (or unicode on Python 2) by decoding + with UTF-8 encoding (recommended). + Currently, the default is true, but it will be changed to false in + near future. So you must specify it explicitly for keeping backward + compatibility. + + *encoding* option which is deprecated overrides this option. + + :param callable object_hook: + When specified, it should be callable. + Unpacker calls it with a dict argument after unpacking msgpack map. + (See also simplejson) + + :param callable object_pairs_hook: + When specified, it should be callable. + Unpacker calls it with a list of key-value pairs after unpacking msgpack map. + (See also simplejson) + + :param str encoding: + Encoding used for decoding msgpack raw. + If it is None (default), msgpack raw is deserialized to Python bytes. + + :param str unicode_errors: + (deprecated) Used for decoding msgpack raw with *encoding*. + (default: `'strict'`) + + :param int max_buffer_size: + Limits size of data waiting unpacked. 0 means system's INT_MAX (default). + Raises `BufferFull` exception when it is insufficient. + You should set this parameter when unpacking data from untrusted source. + + :param int max_str_len: + Limits max length of str. (default: 2**31-1) + + :param int max_bin_len: + Limits max length of bin. (default: 2**31-1) + + :param int max_array_len: + Limits max length of array. (default: 2**31-1) + + :param int max_map_len: + Limits max length of map. (default: 2**31-1) + + + example of streaming deserialize from file-like object:: + + unpacker = Unpacker(file_like, raw=False) + for o in unpacker: + process(o) + + example of streaming deserialize from socket:: + + unpacker = Unpacker(raw=False) + while True: + buf = sock.recv(1024**2) + if not buf: + break + unpacker.feed(buf) + for o in unpacker: + process(o) + """ + + def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, + object_hook=None, object_pairs_hook=None, list_hook=None, + encoding=None, unicode_errors=None, max_buffer_size=0, + ext_hook=ExtType, + max_str_len=2147483647, # 2**32-1 + max_bin_len=2147483647, + max_array_len=2147483647, + max_map_len=2147483647, + max_ext_len=2147483647): + + if encoding is not None: + warnings.warn( + "encoding is deprecated, Use raw=False instead.", + PendingDeprecationWarning) + + if unicode_errors is None: + unicode_errors = 'strict' + + if file_like is None: + self._feeding = True + else: + if not callable(file_like.read): + raise TypeError("`file_like.read` must be callable") + self.file_like = file_like + self._feeding = False + + #: array of bytes fed. + self._buffer = bytearray() + #: Which position we currently reads + self._buff_i = 0 + + # When Unpacker is used as an iterable, between the calls to next(), + # the buffer is not "consumed" completely, for efficiency sake. + # Instead, it is done sloppily. To make sure we raise BufferFull at + # the correct moments, we have to keep track of how sloppy we were. + # Furthermore, when the buffer is incomplete (that is: in the case + # we raise an OutOfData) we need to rollback the buffer to the correct + # state, which _buf_checkpoint records. + self._buf_checkpoint = 0 + + self._max_buffer_size = max_buffer_size or 2**31-1 + if read_size > self._max_buffer_size: + raise ValueError("read_size must be smaller than max_buffer_size") + self._read_size = read_size or min(self._max_buffer_size, 16*1024) + self._raw = bool(raw) + self._encoding = encoding + self._unicode_errors = unicode_errors + self._use_list = use_list + self._list_hook = list_hook + self._object_hook = object_hook + self._object_pairs_hook = object_pairs_hook + self._ext_hook = ext_hook + self._max_str_len = max_str_len + self._max_bin_len = max_bin_len + self._max_array_len = max_array_len + self._max_map_len = max_map_len + self._max_ext_len = max_ext_len + self._stream_offset = 0 + + if list_hook is not None and not callable(list_hook): + raise TypeError('`list_hook` is not callable') + if object_hook is not None and not callable(object_hook): + raise TypeError('`object_hook` is not callable') + if object_pairs_hook is not None and not callable(object_pairs_hook): + raise TypeError('`object_pairs_hook` is not callable') + if object_hook is not None and object_pairs_hook is not None: + raise TypeError("object_pairs_hook and object_hook are mutually " + "exclusive") + if not callable(ext_hook): + raise TypeError("`ext_hook` is not callable") + + def feed(self, next_bytes): + assert self._feeding + view = _get_data_from_buffer(next_bytes) + if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size): + raise BufferFull + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + self._buffer += view + + def _consume(self): + """ Gets rid of the used parts of the buffer. """ + self._stream_offset += self._buff_i - self._buf_checkpoint + self._buf_checkpoint = self._buff_i + + def _got_extradata(self): + return self._buff_i < len(self._buffer) + + def _get_extradata(self): + return self._buffer[self._buff_i:] + + def read_bytes(self, n): + return self._read(n) + + def _read(self, n): + # (int) -> bytearray + self._reserve(n) + i = self._buff_i + self._buff_i = i+n + return self._buffer[i:i+n] + + def _reserve(self, n): + remain_bytes = len(self._buffer) - self._buff_i - n + + # Fast path: buffer has n bytes already + if remain_bytes >= 0: + return + + if self._feeding: + self._buff_i = self._buf_checkpoint + raise OutOfData + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Read from file + remain_bytes = -remain_bytes + while remain_bytes > 0: + to_read_bytes = max(self._read_size, remain_bytes) + read_data = self.file_like.read(to_read_bytes) + if not read_data: + break + assert isinstance(read_data, bytes) + self._buffer += read_data + remain_bytes -= len(read_data) + + if len(self._buffer) < n + self._buff_i: + self._buff_i = 0 # rollback + raise OutOfData + + def _read_header(self, execute=EX_CONSTRUCT): + typ = TYPE_IMMEDIATE + n = 0 + obj = None + self._reserve(1) + b = self._buffer[self._buff_i] + self._buff_i += 1 + if b & 0b10000000 == 0: + obj = b + elif b & 0b11100000 == 0b11100000: + obj = -1 - (b ^ 0xff) + elif b & 0b11100000 == 0b10100000: + n = b & 0b00011111 + typ = TYPE_RAW + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b & 0b11110000 == 0b10010000: + n = b & 0b00001111 + typ = TYPE_ARRAY + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b & 0b11110000 == 0b10000000: + n = b & 0b00001111 + typ = TYPE_MAP + if n > self._max_map_len: + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + elif b == 0xc0: + obj = None + elif b == 0xc2: + obj = False + elif b == 0xc3: + obj = True + elif b == 0xc4: + typ = TYPE_BIN + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 + if n > self._max_bin_len: + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc5: + typ = TYPE_BIN + self._reserve(2) + n = struct.unpack_from(">H", self._buffer, self._buff_i)[0] + self._buff_i += 2 + if n > self._max_bin_len: + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc6: + typ = TYPE_BIN + self._reserve(4) + n = struct.unpack_from(">I", self._buffer, self._buff_i)[0] + self._buff_i += 4 + if n > self._max_bin_len: + raise UnpackValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc7: # ext 8 + typ = TYPE_EXT + self._reserve(2) + L, n = struct.unpack_from('Bb', self._buffer, self._buff_i) + self._buff_i += 2 + if L > self._max_ext_len: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xc8: # ext 16 + typ = TYPE_EXT + self._reserve(3) + L, n = struct.unpack_from('>Hb', self._buffer, self._buff_i) + self._buff_i += 3 + if L > self._max_ext_len: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xc9: # ext 32 + typ = TYPE_EXT + self._reserve(5) + L, n = struct.unpack_from('>Ib', self._buffer, self._buff_i) + self._buff_i += 5 + if L > self._max_ext_len: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xca: + self._reserve(4) + obj = struct.unpack_from(">f", self._buffer, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xcb: + self._reserve(8) + obj = struct.unpack_from(">d", self._buffer, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xcc: + self._reserve(1) + obj = self._buffer[self._buff_i] + self._buff_i += 1 + elif b == 0xcd: + self._reserve(2) + obj = struct.unpack_from(">H", self._buffer, self._buff_i)[0] + self._buff_i += 2 + elif b == 0xce: + self._reserve(4) + obj = struct.unpack_from(">I", self._buffer, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xcf: + self._reserve(8) + obj = struct.unpack_from(">Q", self._buffer, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xd0: + self._reserve(1) + obj = struct.unpack_from("b", self._buffer, self._buff_i)[0] + self._buff_i += 1 + elif b == 0xd1: + self._reserve(2) + obj = struct.unpack_from(">h", self._buffer, self._buff_i)[0] + self._buff_i += 2 + elif b == 0xd2: + self._reserve(4) + obj = struct.unpack_from(">i", self._buffer, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xd3: + self._reserve(8) + obj = struct.unpack_from(">q", self._buffer, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xd4: # fixext 1 + typ = TYPE_EXT + if self._max_ext_len < 1: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) + self._reserve(2) + n, obj = struct.unpack_from("b1s", self._buffer, self._buff_i) + self._buff_i += 2 + elif b == 0xd5: # fixext 2 + typ = TYPE_EXT + if self._max_ext_len < 2: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) + self._reserve(3) + n, obj = struct.unpack_from("b2s", self._buffer, self._buff_i) + self._buff_i += 3 + elif b == 0xd6: # fixext 4 + typ = TYPE_EXT + if self._max_ext_len < 4: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) + self._reserve(5) + n, obj = struct.unpack_from("b4s", self._buffer, self._buff_i) + self._buff_i += 5 + elif b == 0xd7: # fixext 8 + typ = TYPE_EXT + if self._max_ext_len < 8: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) + self._reserve(9) + n, obj = struct.unpack_from("b8s", self._buffer, self._buff_i) + self._buff_i += 9 + elif b == 0xd8: # fixext 16 + typ = TYPE_EXT + if self._max_ext_len < 16: + raise UnpackValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) + self._reserve(17) + n, obj = struct.unpack_from("b16s", self._buffer, self._buff_i) + self._buff_i += 17 + elif b == 0xd9: + typ = TYPE_RAW + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xda: + typ = TYPE_RAW + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xdb: + typ = TYPE_RAW + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_str_len: + raise UnpackValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xdc: + typ = TYPE_ARRAY + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xdd: + typ = TYPE_ARRAY + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_array_len: + raise UnpackValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xde: + self._reserve(2) + n, = struct.unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 + if n > self._max_map_len: + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + typ = TYPE_MAP + elif b == 0xdf: + self._reserve(4) + n, = struct.unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_map_len: + raise UnpackValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + typ = TYPE_MAP + else: + raise UnpackValueError("Unknown header: 0x%x" % b) + return typ, n, obj + + def _unpack(self, execute=EX_CONSTRUCT): + typ, n, obj = self._read_header(execute) + + if execute == EX_READ_ARRAY_HEADER: + if typ != TYPE_ARRAY: + raise UnpackValueError("Expected array") + return n + if execute == EX_READ_MAP_HEADER: + if typ != TYPE_MAP: + raise UnpackValueError("Expected map") + return n + # TODO should we eliminate the recursion? + if typ == TYPE_ARRAY: + if execute == EX_SKIP: + for i in xrange(n): + # TODO check whether we need to call `list_hook` + self._unpack(EX_SKIP) + return + ret = newlist_hint(n) + for i in xrange(n): + ret.append(self._unpack(EX_CONSTRUCT)) + if self._list_hook is not None: + ret = self._list_hook(ret) + # TODO is the interaction between `list_hook` and `use_list` ok? + return ret if self._use_list else tuple(ret) + if typ == TYPE_MAP: + if execute == EX_SKIP: + for i in xrange(n): + # TODO check whether we need to call hooks + self._unpack(EX_SKIP) + self._unpack(EX_SKIP) + return + if self._object_pairs_hook is not None: + ret = self._object_pairs_hook( + (self._unpack(EX_CONSTRUCT), + self._unpack(EX_CONSTRUCT)) + for _ in xrange(n)) + else: + ret = {} + for _ in xrange(n): + key = self._unpack(EX_CONSTRUCT) + ret[key] = self._unpack(EX_CONSTRUCT) + if self._object_hook is not None: + ret = self._object_hook(ret) + return ret + if execute == EX_SKIP: + return + if typ == TYPE_RAW: + if self._encoding is not None: + obj = obj.decode(self._encoding, self._unicode_errors) + elif self._raw: + obj = bytes(obj) + else: + obj = obj.decode('utf_8') + return obj + if typ == TYPE_EXT: + return self._ext_hook(n, bytes(obj)) + if typ == TYPE_BIN: + return bytes(obj) + assert typ == TYPE_IMMEDIATE + return obj + + def __iter__(self): + return self + + def __next__(self): + try: + ret = self._unpack(EX_CONSTRUCT) + self._consume() + return ret + except OutOfData: + self._consume() + raise StopIteration + + next = __next__ + + def skip(self, write_bytes=None): + self._unpack(EX_SKIP) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + + def unpack(self, write_bytes=None): + ret = self._unpack(EX_CONSTRUCT) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + return ret + + def read_array_header(self, write_bytes=None): + ret = self._unpack(EX_READ_ARRAY_HEADER) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + return ret + + def read_map_header(self, write_bytes=None): + ret = self._unpack(EX_READ_MAP_HEADER) + if write_bytes is not None: + warnings.warn("`write_bytes` option is deprecated. Use `.tell()` instead.", DeprecationWarning) + write_bytes(self._buffer[self._buf_checkpoint:self._buff_i]) + self._consume() + return ret + + def tell(self): + return self._stream_offset + + +class Packer(object): + """ + MessagePack Packer + + usage: + + packer = Packer() + astream.write(packer.pack(a)) + astream.write(packer.pack(b)) + + Packer's constructor has some keyword arguments: + + :param callable default: + Convert user type to builtin type that Packer supports. + See also simplejson's document. + + :param bool use_single_float: + Use single precision float type for float. (default: False) + + :param bool autoreset: + Reset buffer after each pack and return its content as `bytes`. (default: True). + If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + + :param bool use_bin_type: + Use bin type introduced in msgpack spec 2.0 for bytes. + It also enables str8 type for unicode. + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializeable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param str encoding: + (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') + + :param str unicode_errors: + Error handler for encoding unicode. (default: 'strict') + """ + def __init__(self, default=None, encoding=None, unicode_errors=None, + use_single_float=False, autoreset=True, use_bin_type=False, + strict_types=False): + if encoding is None: + encoding = 'utf_8' + else: + warnings.warn( + "encoding is deprecated, Use raw=False instead.", + PendingDeprecationWarning) + + if unicode_errors is None: + unicode_errors = 'strict' + + self._strict_types = strict_types + self._use_float = use_single_float + self._autoreset = autoreset + self._use_bin_type = use_bin_type + self._encoding = encoding + self._unicode_errors = unicode_errors + self._buffer = StringIO() + if default is not None: + if not callable(default): + raise TypeError("default must be callable") + self._default = default + + def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, + check=isinstance, check_type_strict=_check_type_strict): + default_used = False + if self._strict_types: + check = check_type_strict + list_types = list + else: + list_types = (list, tuple) + while True: + if nest_limit < 0: + raise PackValueError("recursion limit exceeded") + if obj is None: + return self._buffer.write(b"\xc0") + if check(obj, bool): + if obj: + return self._buffer.write(b"\xc3") + return self._buffer.write(b"\xc2") + if check(obj, int_types): + if 0 <= obj < 0x80: + return self._buffer.write(struct.pack("B", obj)) + if -0x20 <= obj < 0: + return self._buffer.write(struct.pack("b", obj)) + if 0x80 <= obj <= 0xff: + return self._buffer.write(struct.pack("BB", 0xcc, obj)) + if -0x80 <= obj < 0: + return self._buffer.write(struct.pack(">Bb", 0xd0, obj)) + if 0xff < obj <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xcd, obj)) + if -0x8000 <= obj < -0x80: + return self._buffer.write(struct.pack(">Bh", 0xd1, obj)) + if 0xffff < obj <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xce, obj)) + if -0x80000000 <= obj < -0x8000: + return self._buffer.write(struct.pack(">Bi", 0xd2, obj)) + if 0xffffffff < obj <= 0xffffffffffffffff: + return self._buffer.write(struct.pack(">BQ", 0xcf, obj)) + if -0x8000000000000000 <= obj < -0x80000000: + return self._buffer.write(struct.pack(">Bq", 0xd3, obj)) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = True + continue + raise PackOverflowError("Integer value out of range") + if check(obj, (bytes, bytearray)): + n = len(obj) + if n >= 2**32: + raise PackValueError("%s is too large" % type(obj).__name__) + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, Unicode): + if self._encoding is None: + raise TypeError( + "Can't encode unicode string: " + "no encoding is specified") + obj = obj.encode(self._encoding, self._unicode_errors) + n = len(obj) + if n >= 2**32: + raise PackValueError("String is too large") + self._pack_raw_header(n) + return self._buffer.write(obj) + if check(obj, memoryview): + n = len(obj) * obj.itemsize + if n >= 2**32: + raise PackValueError("Memoryview is too large") + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, float): + if self._use_float: + return self._buffer.write(struct.pack(">Bf", 0xca, obj)) + return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) + if check(obj, ExtType): + code = obj.code + data = obj.data + assert isinstance(code, int) + assert isinstance(data, bytes) + L = len(data) + if L == 1: + self._buffer.write(b'\xd4') + elif L == 2: + self._buffer.write(b'\xd5') + elif L == 4: + self._buffer.write(b'\xd6') + elif L == 8: + self._buffer.write(b'\xd7') + elif L == 16: + self._buffer.write(b'\xd8') + elif L <= 0xff: + self._buffer.write(struct.pack(">BB", 0xc7, L)) + elif L <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xc8, L)) + else: + self._buffer.write(struct.pack(">BI", 0xc9, L)) + self._buffer.write(struct.pack("b", code)) + self._buffer.write(data) + return + if check(obj, list_types): + n = len(obj) + self._pack_array_header(n) + for i in xrange(n): + self._pack(obj[i], nest_limit - 1) + return + if check(obj, dict): + return self._pack_map_pairs(len(obj), dict_iteritems(obj), + nest_limit - 1) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = 1 + continue + raise TypeError("Cannot serialize %r" % (obj, )) + + def pack(self, obj): + try: + self._pack(obj) + except: + self._buffer = StringIO() # force reset + raise + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_map_pairs(self, pairs): + self._pack_map_pairs(len(pairs), pairs) + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_array_header(self, n): + if n >= 2**32: + raise PackValueError + self._pack_array_header(n) + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_map_header(self, n): + if n >= 2**32: + raise PackValueError + self._pack_map_header(n) + ret = self._buffer.getvalue() + if self._autoreset: + self._buffer = StringIO() + elif USING_STRINGBUILDER: + self._buffer = StringIO(ret) + return ret + + def pack_ext_type(self, typecode, data): + if not isinstance(typecode, int): + raise TypeError("typecode must have int type.") + if not 0 <= typecode <= 127: + raise ValueError("typecode should be 0-127") + if not isinstance(data, bytes): + raise TypeError("data must have bytes type") + L = len(data) + if L > 0xffffffff: + raise PackValueError("Too large data") + if L == 1: + self._buffer.write(b'\xd4') + elif L == 2: + self._buffer.write(b'\xd5') + elif L == 4: + self._buffer.write(b'\xd6') + elif L == 8: + self._buffer.write(b'\xd7') + elif L == 16: + self._buffer.write(b'\xd8') + elif L <= 0xff: + self._buffer.write(b'\xc7' + struct.pack('B', L)) + elif L <= 0xffff: + self._buffer.write(b'\xc8' + struct.pack('>H', L)) + else: + self._buffer.write(b'\xc9' + struct.pack('>I', L)) + self._buffer.write(struct.pack('B', typecode)) + self._buffer.write(data) + + def _pack_array_header(self, n): + if n <= 0x0f: + return self._buffer.write(struct.pack('B', 0x90 + n)) + if n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xdc, n)) + if n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xdd, n)) + raise PackValueError("Array is too large") + + def _pack_map_header(self, n): + if n <= 0x0f: + return self._buffer.write(struct.pack('B', 0x80 + n)) + if n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xde, n)) + if n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xdf, n)) + raise PackValueError("Dict is too large") + + def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): + self._pack_map_header(n) + for (k, v) in pairs: + self._pack(k, nest_limit - 1) + self._pack(v, nest_limit - 1) + + def _pack_raw_header(self, n): + if n <= 0x1f: + self._buffer.write(struct.pack('B', 0xa0 + n)) + elif self._use_bin_type and n <= 0xff: + self._buffer.write(struct.pack('>BB', 0xd9, n)) + elif n <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xda, n)) + elif n <= 0xffffffff: + self._buffer.write(struct.pack(">BI", 0xdb, n)) + else: + raise PackValueError('Raw is too large') + + def _pack_bin_header(self, n): + if not self._use_bin_type: + return self._pack_raw_header(n) + elif n <= 0xff: + return self._buffer.write(struct.pack('>BB', 0xc4, n)) + elif n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xc5, n)) + elif n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xc6, n)) + else: + raise PackValueError('Bin is too large') + + def bytes(self): + return self._buffer.getvalue() + + def reset(self): + self._buffer = StringIO() diff --git a/src/borg/algorithms/msgpack/pack.h b/src/borg/algorithms/msgpack/pack.h new file mode 100644 index 0000000000..4f3ce1d99e --- /dev/null +++ b/src/borg/algorithms/msgpack/pack.h @@ -0,0 +1,119 @@ +/* + * MessagePack for Python packing routine + * + * Copyright (C) 2009 Naoki INADA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "sysdep.h" +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#define inline __inline +#endif + +typedef struct msgpack_packer { + char *buf; + size_t length; + size_t buf_size; + bool use_bin_type; +} msgpack_packer; + +typedef struct Packer Packer; + +static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l) +{ + char* buf = pk->buf; + size_t bs = pk->buf_size; + size_t len = pk->length; + + if (len + l > bs) { + bs = (len + l) * 2; + buf = (char*)PyMem_Realloc(buf, bs); + if (!buf) { + PyErr_NoMemory(); + return -1; + } + } + memcpy(buf + len, data, l); + len += l; + + pk->buf = buf; + pk->buf_size = bs; + pk->length = len; + return 0; +} + +#define msgpack_pack_append_buffer(user, buf, len) \ + return msgpack_pack_write(user, (const char*)buf, len) + +#include "pack_template.h" + +// return -2 when o is too long +static inline int +msgpack_pack_unicode(msgpack_packer *pk, PyObject *o, long long limit) +{ +#if PY_MAJOR_VERSION >= 3 + assert(PyUnicode_Check(o)); + + Py_ssize_t len; + const char* buf = PyUnicode_AsUTF8AndSize(o, &len); + if (buf == NULL) + return -1; + + if (len > limit) { + return -2; + } + + int ret = msgpack_pack_raw(pk, len); + if (ret) return ret; + + return msgpack_pack_raw_body(pk, buf, len); +#else + PyObject *bytes; + Py_ssize_t len; + int ret; + + // py2 + bytes = PyUnicode_AsUTF8String(o); + if (bytes == NULL) + return -1; + + len = PyString_GET_SIZE(bytes); + if (len > limit) { + Py_DECREF(bytes); + return -2; + } + + ret = msgpack_pack_raw(pk, len); + if (ret) { + Py_DECREF(bytes); + return -1; + } + ret = msgpack_pack_raw_body(pk, PyString_AS_STRING(bytes), len); + Py_DECREF(bytes); + return ret; +#endif +} + +#ifdef __cplusplus +} +#endif diff --git a/src/borg/algorithms/msgpack/pack_template.h b/src/borg/algorithms/msgpack/pack_template.h new file mode 100644 index 0000000000..5d1088f4b7 --- /dev/null +++ b/src/borg/algorithms/msgpack/pack_template.h @@ -0,0 +1,785 @@ +/* + * MessagePack packing routine template + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined(__LITTLE_ENDIAN__) +#define TAKE8_8(d) ((uint8_t*)&d)[0] +#define TAKE8_16(d) ((uint8_t*)&d)[0] +#define TAKE8_32(d) ((uint8_t*)&d)[0] +#define TAKE8_64(d) ((uint8_t*)&d)[0] +#elif defined(__BIG_ENDIAN__) +#define TAKE8_8(d) ((uint8_t*)&d)[0] +#define TAKE8_16(d) ((uint8_t*)&d)[1] +#define TAKE8_32(d) ((uint8_t*)&d)[3] +#define TAKE8_64(d) ((uint8_t*)&d)[7] +#endif + +#ifndef msgpack_pack_append_buffer +#error msgpack_pack_append_buffer callback is not defined +#endif + + +/* + * Integer + */ + +#define msgpack_pack_real_uint8(x, d) \ +do { \ + if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \ + } else { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ +} while(0) + +#define msgpack_pack_real_uint16(x, d) \ +do { \ + if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \ + } else if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } \ +} while(0) + +#define msgpack_pack_real_uint32(x, d) \ +do { \ + if(d < (1<<8)) { \ + if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \ + } else { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else { \ + if(d < (1<<16)) { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_uint64(x, d) \ +do { \ + if(d < (1ULL<<8)) { \ + if(d < (1ULL<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \ + } else { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else { \ + if(d < (1ULL<<16)) { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else if(d < (1ULL<<32)) { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } else { \ + /* unsigned 64 */ \ + unsigned char buf[9]; \ + buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \ + msgpack_pack_append_buffer(x, buf, 9); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_int8(x, d) \ +do { \ + if(d < -(1<<5)) { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \ + } \ +} while(0) + +#define msgpack_pack_real_int16(x, d) \ +do { \ + if(d < -(1<<5)) { \ + if(d < -(1<<7)) { \ + /* signed 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \ + } else { \ + if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_int32(x, d) \ +do { \ + if(d < -(1<<5)) { \ + if(d < -(1<<15)) { \ + /* signed 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } else if(d < -(1<<7)) { \ + /* signed 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \ + } else { \ + if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else if(d < (1<<16)) { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_int64(x, d) \ +do { \ + if(d < -(1LL<<5)) { \ + if(d < -(1LL<<15)) { \ + if(d < -(1LL<<31)) { \ + /* signed 64 */ \ + unsigned char buf[9]; \ + buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \ + msgpack_pack_append_buffer(x, buf, 9); \ + } else { \ + /* signed 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } \ + } else { \ + if(d < -(1<<7)) { \ + /* signed 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } \ + } else if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \ + } else { \ + if(d < (1LL<<16)) { \ + if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } \ + } else { \ + if(d < (1LL<<32)) { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } else { \ + /* unsigned 64 */ \ + unsigned char buf[9]; \ + buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \ + msgpack_pack_append_buffer(x, buf, 9); \ + } \ + } \ + } \ +} while(0) + + +static inline int msgpack_pack_uint8(msgpack_packer* x, uint8_t d) +{ + msgpack_pack_real_uint8(x, d); +} + +static inline int msgpack_pack_uint16(msgpack_packer* x, uint16_t d) +{ + msgpack_pack_real_uint16(x, d); +} + +static inline int msgpack_pack_uint32(msgpack_packer* x, uint32_t d) +{ + msgpack_pack_real_uint32(x, d); +} + +static inline int msgpack_pack_uint64(msgpack_packer* x, uint64_t d) +{ + msgpack_pack_real_uint64(x, d); +} + +static inline int msgpack_pack_int8(msgpack_packer* x, int8_t d) +{ + msgpack_pack_real_int8(x, d); +} + +static inline int msgpack_pack_int16(msgpack_packer* x, int16_t d) +{ + msgpack_pack_real_int16(x, d); +} + +static inline int msgpack_pack_int32(msgpack_packer* x, int32_t d) +{ + msgpack_pack_real_int32(x, d); +} + +static inline int msgpack_pack_int64(msgpack_packer* x, int64_t d) +{ + msgpack_pack_real_int64(x, d); +} + + +//#ifdef msgpack_pack_inline_func_cint + +static inline int msgpack_pack_short(msgpack_packer* x, short d) +{ +#if defined(SIZEOF_SHORT) +#if SIZEOF_SHORT == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_SHORT == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(SHRT_MAX) +#if SHRT_MAX == 0x7fff + msgpack_pack_real_int16(x, d); +#elif SHRT_MAX == 0x7fffffff + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(short) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(short) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_int(msgpack_packer* x, int d) +{ +#if defined(SIZEOF_INT) +#if SIZEOF_INT == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_INT == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(INT_MAX) +#if INT_MAX == 0x7fff + msgpack_pack_real_int16(x, d); +#elif INT_MAX == 0x7fffffff + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(int) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(int) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_long(msgpack_packer* x, long d) +{ +#if defined(SIZEOF_LONG) +#if SIZEOF_LONG == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_LONG == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(LONG_MAX) +#if LONG_MAX == 0x7fffL + msgpack_pack_real_int16(x, d); +#elif LONG_MAX == 0x7fffffffL + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(long) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(long) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_long_long(msgpack_packer* x, long long d) +{ +#if defined(SIZEOF_LONG_LONG) +#if SIZEOF_LONG_LONG == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_LONG_LONG == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(LLONG_MAX) +#if LLONG_MAX == 0x7fffL + msgpack_pack_real_int16(x, d); +#elif LLONG_MAX == 0x7fffffffL + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(long long) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(long long) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_short(msgpack_packer* x, unsigned short d) +{ +#if defined(SIZEOF_SHORT) +#if SIZEOF_SHORT == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_SHORT == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(USHRT_MAX) +#if USHRT_MAX == 0xffffU + msgpack_pack_real_uint16(x, d); +#elif USHRT_MAX == 0xffffffffU + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned short) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned short) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_int(msgpack_packer* x, unsigned int d) +{ +#if defined(SIZEOF_INT) +#if SIZEOF_INT == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_INT == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(UINT_MAX) +#if UINT_MAX == 0xffffU + msgpack_pack_real_uint16(x, d); +#elif UINT_MAX == 0xffffffffU + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned int) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned int) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_long(msgpack_packer* x, unsigned long d) +{ +#if defined(SIZEOF_LONG) +#if SIZEOF_LONG == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_LONG == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(ULONG_MAX) +#if ULONG_MAX == 0xffffUL + msgpack_pack_real_uint16(x, d); +#elif ULONG_MAX == 0xffffffffUL + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned long) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned long) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_long_long(msgpack_packer* x, unsigned long long d) +{ +#if defined(SIZEOF_LONG_LONG) +#if SIZEOF_LONG_LONG == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_LONG_LONG == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(ULLONG_MAX) +#if ULLONG_MAX == 0xffffUL + msgpack_pack_real_uint16(x, d); +#elif ULLONG_MAX == 0xffffffffUL + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned long long) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned long long) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +//#undef msgpack_pack_inline_func_cint +//#endif + + + +/* + * Float + */ + +static inline int msgpack_pack_float(msgpack_packer* x, float d) +{ + union { float f; uint32_t i; } mem; + mem.f = d; + unsigned char buf[5]; + buf[0] = 0xca; _msgpack_store32(&buf[1], mem.i); + msgpack_pack_append_buffer(x, buf, 5); +} + +static inline int msgpack_pack_double(msgpack_packer* x, double d) +{ + union { double f; uint64_t i; } mem; + mem.f = d; + unsigned char buf[9]; + buf[0] = 0xcb; +#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi + // https://github.com/msgpack/msgpack-perl/pull/1 + mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL); +#endif + _msgpack_store64(&buf[1], mem.i); + msgpack_pack_append_buffer(x, buf, 9); +} + + +/* + * Nil + */ + +static inline int msgpack_pack_nil(msgpack_packer* x) +{ + static const unsigned char d = 0xc0; + msgpack_pack_append_buffer(x, &d, 1); +} + + +/* + * Boolean + */ + +static inline int msgpack_pack_true(msgpack_packer* x) +{ + static const unsigned char d = 0xc3; + msgpack_pack_append_buffer(x, &d, 1); +} + +static inline int msgpack_pack_false(msgpack_packer* x) +{ + static const unsigned char d = 0xc2; + msgpack_pack_append_buffer(x, &d, 1); +} + + +/* + * Array + */ + +static inline int msgpack_pack_array(msgpack_packer* x, unsigned int n) +{ + if(n < 16) { + unsigned char d = 0x90 | n; + msgpack_pack_append_buffer(x, &d, 1); + } else if(n < 65536) { + unsigned char buf[3]; + buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5]; + buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n); + msgpack_pack_append_buffer(x, buf, 5); + } +} + + +/* + * Map + */ + +static inline int msgpack_pack_map(msgpack_packer* x, unsigned int n) +{ + if(n < 16) { + unsigned char d = 0x80 | n; + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); + } else if(n < 65536) { + unsigned char buf[3]; + buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5]; + buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n); + msgpack_pack_append_buffer(x, buf, 5); + } +} + + +/* + * Raw + */ + +static inline int msgpack_pack_raw(msgpack_packer* x, size_t l) +{ + if (l < 32) { + unsigned char d = 0xa0 | (uint8_t)l; + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); + } else if (x->use_bin_type && l < 256) { // str8 is new format introduced with bin. + unsigned char buf[2] = {0xd9, (uint8_t)l}; + msgpack_pack_append_buffer(x, buf, 2); + } else if (l < 65536) { + unsigned char buf[3]; + buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5]; + buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l); + msgpack_pack_append_buffer(x, buf, 5); + } +} + +/* + * bin + */ +static inline int msgpack_pack_bin(msgpack_packer *x, size_t l) +{ + if (!x->use_bin_type) { + return msgpack_pack_raw(x, l); + } + if (l < 256) { + unsigned char buf[2] = {0xc4, (unsigned char)l}; + msgpack_pack_append_buffer(x, buf, 2); + } else if (l < 65536) { + unsigned char buf[3] = {0xc5}; + _msgpack_store16(&buf[1], (uint16_t)l); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5] = {0xc6}; + _msgpack_store32(&buf[1], (uint32_t)l); + msgpack_pack_append_buffer(x, buf, 5); + } +} + +static inline int msgpack_pack_raw_body(msgpack_packer* x, const void* b, size_t l) +{ + if (l > 0) msgpack_pack_append_buffer(x, (const unsigned char*)b, l); + return 0; +} + +/* + * Ext + */ +static inline int msgpack_pack_ext(msgpack_packer* x, char typecode, size_t l) +{ + if (l == 1) { + unsigned char buf[2]; + buf[0] = 0xd4; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 2) { + unsigned char buf[2]; + buf[0] = 0xd5; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 4) { + unsigned char buf[2]; + buf[0] = 0xd6; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 8) { + unsigned char buf[2]; + buf[0] = 0xd7; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 16) { + unsigned char buf[2]; + buf[0] = 0xd8; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l < 256) { + unsigned char buf[3]; + buf[0] = 0xc7; + buf[1] = l; + buf[2] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 3); + } else if(l < 65536) { + unsigned char buf[4]; + buf[0] = 0xc8; + _msgpack_store16(&buf[1], (uint16_t)l); + buf[3] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 4); + } else { + unsigned char buf[6]; + buf[0] = 0xc9; + _msgpack_store32(&buf[1], (uint32_t)l); + buf[5] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 6); + } + +} + + + +#undef msgpack_pack_append_buffer + +#undef TAKE8_8 +#undef TAKE8_16 +#undef TAKE8_32 +#undef TAKE8_64 + +#undef msgpack_pack_real_uint8 +#undef msgpack_pack_real_uint16 +#undef msgpack_pack_real_uint32 +#undef msgpack_pack_real_uint64 +#undef msgpack_pack_real_int8 +#undef msgpack_pack_real_int16 +#undef msgpack_pack_real_int32 +#undef msgpack_pack_real_int64 diff --git a/src/borg/algorithms/msgpack/sysdep.h b/src/borg/algorithms/msgpack/sysdep.h new file mode 100644 index 0000000000..ed9c1bc0b8 --- /dev/null +++ b/src/borg/algorithms/msgpack/sysdep.h @@ -0,0 +1,194 @@ +/* + * MessagePack system dependencies + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MSGPACK_SYSDEP_H__ +#define MSGPACK_SYSDEP_H__ + +#include +#include +#if defined(_MSC_VER) && _MSC_VER < 1600 +typedef __int8 int8_t; +typedef unsigned __int8 uint8_t; +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#elif defined(_MSC_VER) // && _MSC_VER >= 1600 +#include +#else +#include +#include +#endif + +#ifdef _WIN32 +#define _msgpack_atomic_counter_header +typedef long _msgpack_atomic_counter_t; +#define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr) +#define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr) +#elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41) +#define _msgpack_atomic_counter_header "gcc_atomic.h" +#else +typedef unsigned int _msgpack_atomic_counter_t; +#define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1) +#define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1) +#endif + +#ifdef _WIN32 + +#ifdef __cplusplus +/* numeric_limits::min,max */ +#ifdef max +#undef max +#endif +#ifdef min +#undef min +#endif +#endif + +#else +#include /* __BYTE_ORDER */ +#endif + +#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN__ +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN__ +#elif _WIN32 +#define __LITTLE_ENDIAN__ +#endif +#endif + + +#ifdef __LITTLE_ENDIAN__ + +#ifdef _WIN32 +# if defined(ntohs) +# define _msgpack_be16(x) ntohs(x) +# elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400) +# define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x)) +# else +# define _msgpack_be16(x) ( \ + ((((uint16_t)x) << 8) ) | \ + ((((uint16_t)x) >> 8) ) ) +# endif +#else +# define _msgpack_be16(x) ntohs(x) +#endif + +#ifdef _WIN32 +# if defined(ntohl) +# define _msgpack_be32(x) ntohl(x) +# elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400) +# define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x)) +# else +# define _msgpack_be32(x) \ + ( ((((uint32_t)x) << 24) ) | \ + ((((uint32_t)x) << 8) & 0x00ff0000U ) | \ + ((((uint32_t)x) >> 8) & 0x0000ff00U ) | \ + ((((uint32_t)x) >> 24) ) ) +# endif +#else +# define _msgpack_be32(x) ntohl(x) +#endif + +#if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400) +# define _msgpack_be64(x) (_byteswap_uint64(x)) +#elif defined(bswap_64) +# define _msgpack_be64(x) bswap_64(x) +#elif defined(__DARWIN_OSSwapInt64) +# define _msgpack_be64(x) __DARWIN_OSSwapInt64(x) +#else +#define _msgpack_be64(x) \ + ( ((((uint64_t)x) << 56) ) | \ + ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \ + ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \ + ((((uint64_t)x) << 8) & 0x000000ff00000000ULL ) | \ + ((((uint64_t)x) >> 8) & 0x00000000ff000000ULL ) | \ + ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \ + ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \ + ((((uint64_t)x) >> 56) ) ) +#endif + +#define _msgpack_load16(cast, from) ((cast)( \ + (((uint16_t)((uint8_t*)(from))[0]) << 8) | \ + (((uint16_t)((uint8_t*)(from))[1]) ) )) + +#define _msgpack_load32(cast, from) ((cast)( \ + (((uint32_t)((uint8_t*)(from))[0]) << 24) | \ + (((uint32_t)((uint8_t*)(from))[1]) << 16) | \ + (((uint32_t)((uint8_t*)(from))[2]) << 8) | \ + (((uint32_t)((uint8_t*)(from))[3]) ) )) + +#define _msgpack_load64(cast, from) ((cast)( \ + (((uint64_t)((uint8_t*)(from))[0]) << 56) | \ + (((uint64_t)((uint8_t*)(from))[1]) << 48) | \ + (((uint64_t)((uint8_t*)(from))[2]) << 40) | \ + (((uint64_t)((uint8_t*)(from))[3]) << 32) | \ + (((uint64_t)((uint8_t*)(from))[4]) << 24) | \ + (((uint64_t)((uint8_t*)(from))[5]) << 16) | \ + (((uint64_t)((uint8_t*)(from))[6]) << 8) | \ + (((uint64_t)((uint8_t*)(from))[7]) ) )) + +#else + +#define _msgpack_be16(x) (x) +#define _msgpack_be32(x) (x) +#define _msgpack_be64(x) (x) + +#define _msgpack_load16(cast, from) ((cast)( \ + (((uint16_t)((uint8_t*)from)[0]) << 8) | \ + (((uint16_t)((uint8_t*)from)[1]) ) )) + +#define _msgpack_load32(cast, from) ((cast)( \ + (((uint32_t)((uint8_t*)from)[0]) << 24) | \ + (((uint32_t)((uint8_t*)from)[1]) << 16) | \ + (((uint32_t)((uint8_t*)from)[2]) << 8) | \ + (((uint32_t)((uint8_t*)from)[3]) ) )) + +#define _msgpack_load64(cast, from) ((cast)( \ + (((uint64_t)((uint8_t*)from)[0]) << 56) | \ + (((uint64_t)((uint8_t*)from)[1]) << 48) | \ + (((uint64_t)((uint8_t*)from)[2]) << 40) | \ + (((uint64_t)((uint8_t*)from)[3]) << 32) | \ + (((uint64_t)((uint8_t*)from)[4]) << 24) | \ + (((uint64_t)((uint8_t*)from)[5]) << 16) | \ + (((uint64_t)((uint8_t*)from)[6]) << 8) | \ + (((uint64_t)((uint8_t*)from)[7]) ) )) +#endif + + +#define _msgpack_store16(to, num) \ + do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0) +#define _msgpack_store32(to, num) \ + do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0) +#define _msgpack_store64(to, num) \ + do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0) + +/* +#define _msgpack_load16(cast, from) \ + ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); }) +#define _msgpack_load32(cast, from) \ + ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); }) +#define _msgpack_load64(cast, from) \ + ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); }) +*/ + + +#endif /* msgpack/sysdep.h */ diff --git a/src/borg/algorithms/msgpack/unpack.h b/src/borg/algorithms/msgpack/unpack.h new file mode 100644 index 0000000000..63e5543b55 --- /dev/null +++ b/src/borg/algorithms/msgpack/unpack.h @@ -0,0 +1,282 @@ +/* + * MessagePack for Python unpacking routine + * + * Copyright (C) 2009 Naoki INADA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define MSGPACK_EMBED_STACK_SIZE (1024) +#include "unpack_define.h" + +typedef struct unpack_user { + bool use_list; + bool raw; + bool has_pairs_hook; + PyObject *object_hook; + PyObject *list_hook; + PyObject *ext_hook; + const char *encoding; + const char *unicode_errors; + Py_ssize_t max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len; +} unpack_user; + +typedef PyObject* msgpack_unpack_object; +struct unpack_context; +typedef struct unpack_context unpack_context; +typedef int (*execute_fn)(unpack_context *ctx, const char* data, Py_ssize_t len, Py_ssize_t* off); + +static inline msgpack_unpack_object unpack_callback_root(unpack_user* u) +{ + return NULL; +} + +static inline int unpack_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o) +{ + PyObject *p = PyInt_FromLong((long)d); + if (!p) + return -1; + *o = p; + return 0; +} +static inline int unpack_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o) +{ + return unpack_callback_uint16(u, d, o); +} + + +static inline int unpack_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o) +{ + PyObject *p = PyInt_FromSize_t((size_t)d); + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o) +{ + PyObject *p; + if (d > LONG_MAX) { + p = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)d); + } else { + p = PyInt_FromLong((long)d); + } + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o) +{ + PyObject *p = PyInt_FromLong(d); + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o) +{ + return unpack_callback_int32(u, d, o); +} + +static inline int unpack_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o) +{ + return unpack_callback_int32(u, d, o); +} + +static inline int unpack_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o) +{ + PyObject *p; + if (d > LONG_MAX || d < LONG_MIN) { + p = PyLong_FromLongLong((PY_LONG_LONG)d); + } else { + p = PyInt_FromLong((long)d); + } + *o = p; + return 0; +} + +static inline int unpack_callback_double(unpack_user* u, double d, msgpack_unpack_object* o) +{ + PyObject *p = PyFloat_FromDouble(d); + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_float(unpack_user* u, float d, msgpack_unpack_object* o) +{ + return unpack_callback_double(u, d, o); +} + +static inline int unpack_callback_nil(unpack_user* u, msgpack_unpack_object* o) +{ Py_INCREF(Py_None); *o = Py_None; return 0; } + +static inline int unpack_callback_true(unpack_user* u, msgpack_unpack_object* o) +{ Py_INCREF(Py_True); *o = Py_True; return 0; } + +static inline int unpack_callback_false(unpack_user* u, msgpack_unpack_object* o) +{ Py_INCREF(Py_False); *o = Py_False; return 0; } + +static inline int unpack_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o) +{ + if (n > u->max_array_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_array_len(%zd)", n, u->max_array_len); + return -1; + } + PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n); + + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o) +{ + if (u->use_list) + PyList_SET_ITEM(*c, current, o); + else + PyTuple_SET_ITEM(*c, current, o); + return 0; +} + +static inline int unpack_callback_array_end(unpack_user* u, msgpack_unpack_object* c) +{ + if (u->list_hook) { + PyObject *new_c = PyObject_CallFunctionObjArgs(u->list_hook, *c, NULL); + if (!new_c) + return -1; + Py_DECREF(*c); + *c = new_c; + } + return 0; +} + +static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o) +{ + if (n > u->max_map_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_map_len(%zd)", n, u->max_map_len); + return -1; + } + PyObject *p; + if (u->has_pairs_hook) { + p = PyList_New(n); // Or use tuple? + } + else { + p = PyDict_New(); + } + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v) +{ + if (u->has_pairs_hook) { + msgpack_unpack_object item = PyTuple_Pack(2, k, v); + if (!item) + return -1; + Py_DECREF(k); + Py_DECREF(v); + PyList_SET_ITEM(*c, current, item); + return 0; + } + else if (PyDict_SetItem(*c, k, v) == 0) { + Py_DECREF(k); + Py_DECREF(v); + return 0; + } + return -1; +} + +static inline int unpack_callback_map_end(unpack_user* u, msgpack_unpack_object* c) +{ + if (u->object_hook) { + PyObject *new_c = PyObject_CallFunctionObjArgs(u->object_hook, *c, NULL); + if (!new_c) + return -1; + + Py_DECREF(*c); + *c = new_c; + } + return 0; +} + +static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o) +{ + if (l > u->max_str_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_str_len(%zd)", l, u->max_str_len); + return -1; + } + + PyObject *py; + + if (u->encoding) { + py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors); + } else if (u->raw) { + py = PyBytes_FromStringAndSize(p, l); + } else { + py = PyUnicode_DecodeUTF8(p, l, u->unicode_errors); + } + if (!py) + return -1; + *o = py; + return 0; +} + +static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o) +{ + if (l > u->max_bin_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_bin_len(%zd)", l, u->max_bin_len); + return -1; + } + + PyObject *py = PyBytes_FromStringAndSize(p, l); + if (!py) + return -1; + *o = py; + return 0; +} + +static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos, + unsigned int length, msgpack_unpack_object* o) +{ + PyObject *py; + int8_t typecode = (int8_t)*pos++; + if (!u->ext_hook) { + PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL"); + return -1; + } + if (length-1 > u->max_ext_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_ext_len(%zd)", length, u->max_ext_len); + return -1; + } + // length also includes the typecode, so the actual data is length-1 +#if PY_MAJOR_VERSION == 2 + py = PyObject_CallFunction(u->ext_hook, "(is#)", (int)typecode, pos, (Py_ssize_t)length-1); +#else + py = PyObject_CallFunction(u->ext_hook, "(iy#)", (int)typecode, pos, (Py_ssize_t)length-1); +#endif + if (!py) + return -1; + *o = py; + return 0; +} + +#include "unpack_template.h" diff --git a/src/borg/algorithms/msgpack/unpack_define.h b/src/borg/algorithms/msgpack/unpack_define.h new file mode 100644 index 0000000000..d681277bb8 --- /dev/null +++ b/src/borg/algorithms/msgpack/unpack_define.h @@ -0,0 +1,95 @@ +/* + * MessagePack unpacking routine template + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MSGPACK_UNPACK_DEFINE_H__ +#define MSGPACK_UNPACK_DEFINE_H__ + +#include "sysdep.h" +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifndef MSGPACK_EMBED_STACK_SIZE +#define MSGPACK_EMBED_STACK_SIZE 32 +#endif + + +// CS is first byte & 0x1f +typedef enum { + CS_HEADER = 0x00, // nil + + //CS_ = 0x01, + //CS_ = 0x02, // false + //CS_ = 0x03, // true + + CS_BIN_8 = 0x04, + CS_BIN_16 = 0x05, + CS_BIN_32 = 0x06, + + CS_EXT_8 = 0x07, + CS_EXT_16 = 0x08, + CS_EXT_32 = 0x09, + + CS_FLOAT = 0x0a, + CS_DOUBLE = 0x0b, + CS_UINT_8 = 0x0c, + CS_UINT_16 = 0x0d, + CS_UINT_32 = 0x0e, + CS_UINT_64 = 0x0f, + CS_INT_8 = 0x10, + CS_INT_16 = 0x11, + CS_INT_32 = 0x12, + CS_INT_64 = 0x13, + + //CS_FIXEXT1 = 0x14, + //CS_FIXEXT2 = 0x15, + //CS_FIXEXT4 = 0x16, + //CS_FIXEXT8 = 0x17, + //CS_FIXEXT16 = 0x18, + + CS_RAW_8 = 0x19, + CS_RAW_16 = 0x1a, + CS_RAW_32 = 0x1b, + CS_ARRAY_16 = 0x1c, + CS_ARRAY_32 = 0x1d, + CS_MAP_16 = 0x1e, + CS_MAP_32 = 0x1f, + + ACS_RAW_VALUE, + ACS_BIN_VALUE, + ACS_EXT_VALUE, +} msgpack_unpack_state; + + +typedef enum { + CT_ARRAY_ITEM, + CT_MAP_KEY, + CT_MAP_VALUE, +} msgpack_container_type; + + +#ifdef __cplusplus +} +#endif + +#endif /* msgpack/unpack_define.h */ diff --git a/src/borg/algorithms/msgpack/unpack_template.h b/src/borg/algorithms/msgpack/unpack_template.h new file mode 100644 index 0000000000..525dea2471 --- /dev/null +++ b/src/borg/algorithms/msgpack/unpack_template.h @@ -0,0 +1,479 @@ +/* + * MessagePack unpacking routine template + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef USE_CASE_RANGE +#if !defined(_MSC_VER) +#define USE_CASE_RANGE +#endif +#endif + +typedef struct unpack_stack { + PyObject* obj; + Py_ssize_t size; + Py_ssize_t count; + unsigned int ct; + PyObject* map_key; +} unpack_stack; + +struct unpack_context { + unpack_user user; + unsigned int cs; + unsigned int trail; + unsigned int top; + /* + unpack_stack* stack; + unsigned int stack_size; + unpack_stack embed_stack[MSGPACK_EMBED_STACK_SIZE]; + */ + unpack_stack stack[MSGPACK_EMBED_STACK_SIZE]; +}; + + +static inline void unpack_init(unpack_context* ctx) +{ + ctx->cs = CS_HEADER; + ctx->trail = 0; + ctx->top = 0; + /* + ctx->stack = ctx->embed_stack; + ctx->stack_size = MSGPACK_EMBED_STACK_SIZE; + */ + ctx->stack[0].obj = unpack_callback_root(&ctx->user); +} + +/* +static inline void unpack_destroy(unpack_context* ctx) +{ + if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) { + free(ctx->stack); + } +} +*/ + +static inline PyObject* unpack_data(unpack_context* ctx) +{ + return (ctx)->stack[0].obj; +} + +static inline void unpack_clear(unpack_context *ctx) +{ + Py_CLEAR(ctx->stack[0].obj); +} + +template +static inline int unpack_execute(unpack_context* ctx, const char* data, Py_ssize_t len, Py_ssize_t* off) +{ + assert(len >= *off); + + const unsigned char* p = (unsigned char*)data + *off; + const unsigned char* const pe = (unsigned char*)data + len; + const void* n = p; + + unsigned int trail = ctx->trail; + unsigned int cs = ctx->cs; + unsigned int top = ctx->top; + unpack_stack* stack = ctx->stack; + /* + unsigned int stack_size = ctx->stack_size; + */ + unpack_user* user = &ctx->user; + + PyObject* obj = NULL; + unpack_stack* c = NULL; + + int ret; + +#define construct_cb(name) \ + construct && unpack_callback ## name + +#define push_simple_value(func) \ + if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \ + goto _push +#define push_fixed_value(func, arg) \ + if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \ + goto _push +#define push_variable_value(func, base, pos, len) \ + if(construct_cb(func)(user, \ + (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \ + goto _push + +#define again_fixed_trail(_cs, trail_len) \ + trail = trail_len; \ + cs = _cs; \ + goto _fixed_trail_again +#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \ + trail = trail_len; \ + if(trail == 0) { goto ifzero; } \ + cs = _cs; \ + goto _fixed_trail_again + +#define start_container(func, count_, ct_) \ + if(top >= MSGPACK_EMBED_STACK_SIZE) { goto _failed; } /* FIXME */ \ + if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \ + if((count_) == 0) { obj = stack[top].obj; \ + if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \ + goto _push; } \ + stack[top].ct = ct_; \ + stack[top].size = count_; \ + stack[top].count = 0; \ + ++top; \ + /*printf("container %d count %d stack %d\n",stack[top].obj,count_,top);*/ \ + /*printf("stack push %d\n", top);*/ \ + /* FIXME \ + if(top >= stack_size) { \ + if(stack_size == MSGPACK_EMBED_STACK_SIZE) { \ + size_t csize = sizeof(unpack_stack) * MSGPACK_EMBED_STACK_SIZE; \ + size_t nsize = csize * 2; \ + unpack_stack* tmp = (unpack_stack*)malloc(nsize); \ + if(tmp == NULL) { goto _failed; } \ + memcpy(tmp, ctx->stack, csize); \ + ctx->stack = stack = tmp; \ + ctx->stack_size = stack_size = MSGPACK_EMBED_STACK_SIZE * 2; \ + } else { \ + size_t nsize = sizeof(unpack_stack) * ctx->stack_size * 2; \ + unpack_stack* tmp = (unpack_stack*)realloc(ctx->stack, nsize); \ + if(tmp == NULL) { goto _failed; } \ + ctx->stack = stack = tmp; \ + ctx->stack_size = stack_size = stack_size * 2; \ + } \ + } \ + */ \ + goto _header_again + +#define NEXT_CS(p) ((unsigned int)*p & 0x1f) + +#ifdef USE_CASE_RANGE +#define SWITCH_RANGE_BEGIN switch(*p) { +#define SWITCH_RANGE(FROM, TO) case FROM ... TO: +#define SWITCH_RANGE_DEFAULT default: +#define SWITCH_RANGE_END } +#else +#define SWITCH_RANGE_BEGIN { if(0) { +#define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) { +#define SWITCH_RANGE_DEFAULT } else { +#define SWITCH_RANGE_END } } +#endif + + if(p == pe) { goto _out; } + do { + switch(cs) { + case CS_HEADER: + SWITCH_RANGE_BEGIN + SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum + push_fixed_value(_uint8, *(uint8_t*)p); + SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum + push_fixed_value(_int8, *(int8_t*)p); + SWITCH_RANGE(0xc0, 0xdf) // Variable + switch(*p) { + case 0xc0: // nil + push_simple_value(_nil); + //case 0xc1: // never used + case 0xc2: // false + push_simple_value(_false); + case 0xc3: // true + push_simple_value(_true); + case 0xc4: // bin 8 + again_fixed_trail(NEXT_CS(p), 1); + case 0xc5: // bin 16 + again_fixed_trail(NEXT_CS(p), 2); + case 0xc6: // bin 32 + again_fixed_trail(NEXT_CS(p), 4); + case 0xc7: // ext 8 + again_fixed_trail(NEXT_CS(p), 1); + case 0xc8: // ext 16 + again_fixed_trail(NEXT_CS(p), 2); + case 0xc9: // ext 32 + again_fixed_trail(NEXT_CS(p), 4); + case 0xca: // float + case 0xcb: // double + case 0xcc: // unsigned int 8 + case 0xcd: // unsigned int 16 + case 0xce: // unsigned int 32 + case 0xcf: // unsigned int 64 + case 0xd0: // signed int 8 + case 0xd1: // signed int 16 + case 0xd2: // signed int 32 + case 0xd3: // signed int 64 + again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03)); + case 0xd4: // fixext 1 + case 0xd5: // fixext 2 + case 0xd6: // fixext 4 + case 0xd7: // fixext 8 + again_fixed_trail_if_zero(ACS_EXT_VALUE, + (1 << (((unsigned int)*p) & 0x03))+1, + _ext_zero); + case 0xd8: // fixext 16 + again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero); + case 0xd9: // str 8 + again_fixed_trail(NEXT_CS(p), 1); + case 0xda: // raw 16 + case 0xdb: // raw 32 + case 0xdc: // array 16 + case 0xdd: // array 32 + case 0xde: // map 16 + case 0xdf: // map 32 + again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01)); + default: + goto _failed; + } + SWITCH_RANGE(0xa0, 0xbf) // FixRaw + again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero); + SWITCH_RANGE(0x90, 0x9f) // FixArray + start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM); + SWITCH_RANGE(0x80, 0x8f) // FixMap + start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY); + + SWITCH_RANGE_DEFAULT + goto _failed; + SWITCH_RANGE_END + // end CS_HEADER + + + _fixed_trail_again: + ++p; + + default: + if((size_t)(pe - p) < trail) { goto _out; } + n = p; p += trail - 1; + switch(cs) { + case CS_EXT_8: + again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero); + case CS_EXT_16: + again_fixed_trail_if_zero(ACS_EXT_VALUE, + _msgpack_load16(uint16_t,n)+1, + _ext_zero); + case CS_EXT_32: + again_fixed_trail_if_zero(ACS_EXT_VALUE, + _msgpack_load32(uint32_t,n)+1, + _ext_zero); + case CS_FLOAT: { + union { uint32_t i; float f; } mem; + mem.i = _msgpack_load32(uint32_t,n); + push_fixed_value(_float, mem.f); } + case CS_DOUBLE: { + union { uint64_t i; double f; } mem; + mem.i = _msgpack_load64(uint64_t,n); +#if defined(__arm__) && !(__ARM_EABI__) // arm-oabi + // https://github.com/msgpack/msgpack-perl/pull/1 + mem.i = (mem.i & 0xFFFFFFFFUL) << 32UL | (mem.i >> 32UL); +#endif + push_fixed_value(_double, mem.f); } + case CS_UINT_8: + push_fixed_value(_uint8, *(uint8_t*)n); + case CS_UINT_16: + push_fixed_value(_uint16, _msgpack_load16(uint16_t,n)); + case CS_UINT_32: + push_fixed_value(_uint32, _msgpack_load32(uint32_t,n)); + case CS_UINT_64: + push_fixed_value(_uint64, _msgpack_load64(uint64_t,n)); + + case CS_INT_8: + push_fixed_value(_int8, *(int8_t*)n); + case CS_INT_16: + push_fixed_value(_int16, _msgpack_load16(int16_t,n)); + case CS_INT_32: + push_fixed_value(_int32, _msgpack_load32(int32_t,n)); + case CS_INT_64: + push_fixed_value(_int64, _msgpack_load64(int64_t,n)); + + case CS_BIN_8: + again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero); + case CS_BIN_16: + again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero); + case CS_BIN_32: + again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero); + case ACS_BIN_VALUE: + _bin_zero: + push_variable_value(_bin, data, n, trail); + + case CS_RAW_8: + again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero); + case CS_RAW_16: + again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero); + case CS_RAW_32: + again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero); + case ACS_RAW_VALUE: + _raw_zero: + push_variable_value(_raw, data, n, trail); + + case ACS_EXT_VALUE: + _ext_zero: + push_variable_value(_ext, data, n, trail); + + case CS_ARRAY_16: + start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM); + case CS_ARRAY_32: + /* FIXME security guard */ + start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM); + + case CS_MAP_16: + start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY); + case CS_MAP_32: + /* FIXME security guard */ + start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY); + + default: + goto _failed; + } + } + +_push: + if(top == 0) { goto _finish; } + c = &stack[top-1]; + switch(c->ct) { + case CT_ARRAY_ITEM: + if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; } + if(++c->count == c->size) { + obj = c->obj; + if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; } + --top; + /*printf("stack pop %d\n", top);*/ + goto _push; + } + goto _header_again; + case CT_MAP_KEY: + c->map_key = obj; + c->ct = CT_MAP_VALUE; + goto _header_again; + case CT_MAP_VALUE: + if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; } + if(++c->count == c->size) { + obj = c->obj; + if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; } + --top; + /*printf("stack pop %d\n", top);*/ + goto _push; + } + c->ct = CT_MAP_KEY; + goto _header_again; + + default: + goto _failed; + } + +_header_again: + cs = CS_HEADER; + ++p; + } while(p != pe); + goto _out; + + +_finish: + if (!construct) + unpack_callback_nil(user, &obj); + stack[0].obj = obj; + ++p; + ret = 1; + /*printf("-- finish --\n"); */ + goto _end; + +_failed: + /*printf("** FAILED **\n"); */ + ret = -1; + goto _end; + +_out: + ret = 0; + goto _end; + +_end: + ctx->cs = cs; + ctx->trail = trail; + ctx->top = top; + *off = p - (const unsigned char*)data; + + return ret; +#undef construct_cb +} + +#undef SWITCH_RANGE_BEGIN +#undef SWITCH_RANGE +#undef SWITCH_RANGE_DEFAULT +#undef SWITCH_RANGE_END +#undef push_simple_value +#undef push_fixed_value +#undef push_variable_value +#undef again_fixed_trail +#undef again_fixed_trail_if_zero +#undef start_container + +template +static inline int unpack_container_header(unpack_context* ctx, const char* data, Py_ssize_t len, Py_ssize_t* off) +{ + assert(len >= *off); + uint32_t size; + const unsigned char *const p = (unsigned char*)data + *off; + +#define inc_offset(inc) \ + if (len - *off < inc) \ + return 0; \ + *off += inc; + + switch (*p) { + case var_offset: + inc_offset(3); + size = _msgpack_load16(uint16_t, p + 1); + break; + case var_offset + 1: + inc_offset(5); + size = _msgpack_load32(uint32_t, p + 1); + break; +#ifdef USE_CASE_RANGE + case fixed_offset + 0x0 ... fixed_offset + 0xf: +#else + case fixed_offset + 0x0: + case fixed_offset + 0x1: + case fixed_offset + 0x2: + case fixed_offset + 0x3: + case fixed_offset + 0x4: + case fixed_offset + 0x5: + case fixed_offset + 0x6: + case fixed_offset + 0x7: + case fixed_offset + 0x8: + case fixed_offset + 0x9: + case fixed_offset + 0xa: + case fixed_offset + 0xb: + case fixed_offset + 0xc: + case fixed_offset + 0xd: + case fixed_offset + 0xe: + case fixed_offset + 0xf: +#endif + ++*off; + size = ((unsigned int)*p) & 0x0f; + break; + default: + PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream"); + return -1; + } + unpack_callback_uint32(&ctx->user, size, &ctx->stack[0].obj); + return 1; +} + +#undef SWITCH_RANGE_BEGIN +#undef SWITCH_RANGE +#undef SWITCH_RANGE_DEFAULT +#undef SWITCH_RANGE_END + +static const execute_fn unpack_construct = &unpack_execute; +static const execute_fn unpack_skip = &unpack_execute; +static const execute_fn read_array_header = &unpack_container_header<0x90, 0xdc>; +static const execute_fn read_map_header = &unpack_container_header<0x80, 0xde>; + +#undef NEXT_CS + +/* vim: set ts=4 sw=4 sts=4 expandtab */ diff --git a/src/borg/algorithms/xxh64/xxhash.c b/src/borg/algorithms/xxh64/xxhash.c index 0d0b3a5298..0fae88c5d6 100644 --- a/src/borg/algorithms/xxh64/xxhash.c +++ b/src/borg/algorithms/xxh64/xxhash.c @@ -1,615 +1,43 @@ /* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - - -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2020 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) -# define XXH_FORCE_MEMORY_ACCESS 2 -# elif defined(__INTEL_COMPILER) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. - * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. - * By default, this option is disabled. To enable it, uncomment below define : - */ -/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ -/*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independant Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independance be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. - */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 -#endif - -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; set to 0 when the input data - * is guaranteed to be aligned. +/* + * xxhash.c instantiates functions defined in xxhash.h */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - -/* ************************************* -* Includes & Memory related functions -***************************************/ -/* Modify the local functions below should you wish to use some other memory routines */ -/* for malloc(), free() */ -#include -static void* XXH_malloc(size_t s) { return malloc(s); } -static void XXH_free (void* p) { free(p); } -/* for memcpy() */ -#include -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } +#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ +#define XXH_IMPLEMENTATION /* access definitions */ -#define XXH_STATIC_LINKING_ONLY #include "xxhash.h" - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#ifdef _MSC_VER /* Visual Studio */ -# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ -# define FORCE_INLINE static __forceinline -#else -# if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ -# ifdef __GNUC__ -# define FORCE_INLINE static inline __attribute__((always_inline)) -# else -# define FORCE_INLINE static inline -# endif -# else -# define FORCE_INLINE static -# endif /* __STDC_VERSION__ */ -#endif - - -/* ************************************* -* Basic Types -***************************************/ -#ifndef MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint8_t BYTE; - typedef uint16_t U16; - typedef uint32_t U32; - typedef int32_t S32; -# else - typedef unsigned char BYTE; - typedef unsigned short U16; - typedef unsigned int U32; - typedef signed int S32; -# endif -#endif - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; } __attribute__((packed)) unalign; -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ -#if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) -#endif - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -#elif GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -#else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -#endif - - -/* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; - -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ -#ifndef XXH_CPU_LITTLE_ENDIAN - static const int g_one = 1; -# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) -#endif - - -/* *************************** -* Memory reads -*****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; - -FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); -} - -FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); -} - -/* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - -#ifndef XXH_NO_LONG_LONG - -/* ******************************************************************* -* 64-bits hash functions -*********************************************************************/ - -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) - -/*====== Memory access ======*/ - -#ifndef MEM_MODULE -# define MEM_MODULE -# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) -# include - typedef uint64_t U64; -# else - typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ -# endif -#endif - - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign64; - -static U64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ - -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap64 _byteswap_uint64 -#elif GCC_VERSION >= 403 -# define XXH_swap64 __builtin_bswap64 -#else -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - -FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); -} - -FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); -} - -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} - - -/*====== xxh64 ======*/ - -static const U64 PRIME64_1 = 11400714785074694791ULL; -static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; -} - -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; -} - -FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - U64 h64; -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } -#endif - - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - - -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_CREATESTATE_STATIC(state); - XXH64_reset(state, seed); - XXH64_update(state, input, len); - return XXH64_digest(state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - -/*====== Hash Streaming ======*/ - -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) -{ - memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - -FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - state->memsize += (U32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} - -FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem64; - const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 + PRIME64_5; - } - - h64 += (U64) state->total_len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); -} - - -/*====== Canonical representation ======*/ - -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} - -#endif /* XXH_NO_LONG_LONG */ diff --git a/src/borg/algorithms/xxh64/xxhash.h b/src/borg/algorithms/xxh64/xxhash.h index 5e5d4cf470..f8c05f9b1b 100644 --- a/src/borg/algorithms/xxh64/xxhash.h +++ b/src/borg/algorithms/xxh64/xxhash.h @@ -1,40 +1,42 @@ /* - xxHash - Extremely Fast Hash algorithm - Header File - Copyright (C) 2012-2016, Yann Collet. - - BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - You can contact the author at : - - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (C) 2012-2020 Yann Collet + * + * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * You can contact the author at: + * - xxHash homepage: https://www.xxhash.com + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ -/* Notice extracted from xxHash homepage : +/* TODO: update */ +/* Notice extracted from xxHash homepage: -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +xxHash is an extremely fast hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) @@ -57,54 +59,50 @@ Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. -A 64-bits version, named XXH64, is available since r35. -It offers much better speed, but for 64-bits applications only. +Note: SMHasher's CRC32 implementation is not the fastest one. +Other speed-oriented implementations can be faster, +especially in combination with PCLMUL instruction: +https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 + +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. Name Speed on 64 bits Speed on 32 bits XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s */ -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - -#define XXH_STATIC_LINKING_ONLY - #if defined (__cplusplus) extern "C" { #endif - /* **************************** -* Compiler specifics -******************************/ -#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ -# define restrict /* disable restrict */ -#endif - - -/* **************************** -* Definitions -******************************/ -#include /* size_t */ -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; - - -/* **************************** -* API modifier -******************************/ -/** XXH_PRIVATE_API -* This is useful to include xxhash functions in `static` mode -* in order to inline them, and remove their symbol from the public list. -* Methodology : -* #define XXH_PRIVATE_API -* #include "xxhash.h" -* `xxhash.c` is automatically included. -* It's not useful to compile and link it as a separate module. -*/ -#ifdef XXH_PRIVATE_API -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif + * INLINE mode + ******************************/ +/*! + * XXH_INLINE_ALL (and XXH_PRIVATE_API) + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) @@ -112,23 +110,87 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else -# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static # endif -#else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_PRIVATE_API */ -/*!XXH_NAMESPACE, aka Namespace Emulation : + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, such + * as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ +# ifdef XXH_NAMESPACE +# error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported" + /* + * Note: Alternative: #undef all symbols (it's a pretty large list). + * Without #error: it compiles, but functions are actually not inlined. + */ +# endif +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, but they must + * still be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and is a more dispersed action. + * Meanwhile, renaming can be achieved in a single block + */ +# define XXH_IPREF(Id) XXH_INLINE_ ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ -If you want to include _and expose_ xxHash functions from within your own library, -but also want to avoid symbol collisions with other libraries which may also include xxHash, -you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library -with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values). -Note that no change is required within the calling program as long as it includes `xxhash.h` : -regular symbol name will be automatically translated by this header. -*/ +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +/*! + * XXH_NAMESPACE, aka Namespace Emulation: + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) @@ -158,88 +220,1847 @@ regular symbol name will be automatically translated by this header. * Version ***************************************/ #define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_MINOR 7 +#define XXH_VERSION_RELEASE 4 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) XXH_PUBLIC_API unsigned XXH_versionNumber (void); + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint32_t XXH32_hash_t; +#else +# include +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# else +# if ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +# endif +#endif + +/*! + * XXH32(): + * Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input". + * The memory between input & input+length must be valid (allocated and read-accessible). + * "seed" can be used to alter the result predictably. + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. It provides a superior level of + * dispersion, and greatly reduces the risks of collisions. + */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); + +/******* Streaming *******/ + +/* + * Streaming functions generate the xxHash value from an incrememtal input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + */ + +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +/******* Canonical representation *******/ + +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + */ + +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); + + #ifndef XXH_NO_LONG_LONG /*-********************************************************************** -* 64-bits hash +* 64-bit hash ************************************************************************/ -typedef unsigned long long XXH64_hash_t; +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint64_t XXH64_hash_t; +#else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +#endif -/*! XXH64() : - Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs faster on 64-bits systems, but slower on 32-bits systems (see benchmark). -*/ -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); +/*! + * XXH64(): + * Returns the 64-bit hash of sequence of length @length stored at memory + * address @input. + * @seed can be used to alter the result predictably. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * Note: XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. It provides a superior level of + * dispersion, and greatly reduces the risks of collisions. + */ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed); -/*====== Streaming ======*/ +/******* Streaming *******/ typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed); XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); -/*====== Canonical representation ======*/ -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); + + #endif /* XXH_NO_LONG_LONG */ +#endif /* XXHASH_H_5627135585666179 */ + + + +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation of an XXH + * state, for example, on the stack or in a struct. + * Never **ever** access members directly. + */ + +struct XXH32_state_s { + XXH32_hash_t total_len_32; + XXH32_hash_t large_len; + XXH32_hash_t v1; + XXH32_hash_t v2; + XXH32_hash_t v3; + XXH32_hash_t v4; + XXH32_hash_t mem32[4]; + XXH32_hash_t memsize; + XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +struct XXH64_state_s { + XXH64_hash_t total_len; + XXH64_hash_t v1; + XXH64_hash_t v2; + XXH64_hash_t v3; + XXH64_hash_t v4; + XXH64_hash_t mem64[4]; + XXH32_hash_t memsize; + XXH32_hash_t reserved32; /* required for padding anyway */ + XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + + +/*-********************************************************************** +* XXH3 +* New experimental hash +************************************************************************/ + +/* ************************************************************************ + * XXH3 is a new hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * In general, expect XXH3 to run about ~2x faster on large inputs and >3x + * faster on small ones compared to XXH64, though exact differences depend on + * the platform. + * + * The algorithm is portable: Like XXH32 and XXH64, it generates the same hash + * on all platforms. + * + * It benefits greatly from SIMD and 64-bit arithmetic, but does not require it. + * + * Almost all 32-bit and 64-bit targets that can run XXH32 smoothly can run + * XXH3 at competitive speeds, even if XXH64 runs slowly. Further details are + * explained in the implementation. + * + * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, + * ZVector and scalar targets. This can be controlled with the XXH_VECTOR macro. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * When only 64 bits are needed, prefer calling the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The 128-bit version adds additional strength, but it is slightly slower. + * + * The XXH3 algorithm is still in development. + * The results it produces may still change in future versions. + * + * Results produced by v0.7.x are not comparable with results from v0.7.y. + * However, the API is completely stable, and it can safely be used for + * ephemeral data (local sessions). + * + * Avoid storing values in long-term storage until the algorithm is finalized. + * XXH3's return values will be officially finalized upon reaching v0.8.0. + * + * After which, return values of XXH3 and XXH128 will no longer change in + * future versions. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ + +#ifdef XXH_NAMESPACE +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) + +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) + +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) + +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +#endif + +/* XXH3_64bits(): + * default 64-bit variant, using default secret and default seed of 0. + * It's the fastest variant. */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len); + +/* + * XXH3_64bits_withSeed(): + * This variant generates a custom secret on the fly based on the default + * secret, altered using the `seed` value. + * While this operation is decently fast, note that it's not completely free. + * Note: seed==0 produces the same results as XXH3_64bits(). + */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); + +/* + * XXH3_64bits_withSecret(): + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of the hash highly depends on the secret's entropy. + * Technically, the secret must look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever unsure about the "randonmess" of the blob of bytes, + * consider relabelling it as a "custom seed" instead, + * and employ "XXH3_generateSecret()" (see below) + * to generate a high quality secret derived from this custom seed. + */ +#define XXH3_SECRET_SIZE_MIN 136 +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); + + +/* streaming 64-bit */ + +#if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */ +# include +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +typedef struct XXH3_state_s XXH3_state_t; + +#define XXH3_INTERNALBUFFER_SIZE 256 +#define XXH3_SECRET_DEFAULT_SIZE 192 +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /* used to store a custom secret generated from a seed */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + XXH32_hash_t bufferedSize; + XXH32_hash_t reserved32; + size_t nbStripesPerBlock; + size_t nbStripesSoFar; + size_t secretLimit; + XXH64_hash_t totalLen; + XXH64_hash_t seed; + XXH64_hash_t reserved64; + const unsigned char* extSecret; /* reference to external secret; + * if == NULL, use .customSecret instead */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever possible. + */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); +XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state); + + +/* + * XXH3_64bits_reset(): + * Initialize with the default parameters. + * The result will be equivalent to `XXH3_64bits()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr); +/* + * XXH3_64bits_reset_withSeed(): + * Generate a custom secret from `seed`, and store it into `statePtr`. + * digest will be equivalent to `XXH3_64bits_withSeed()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); +/* + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of the hash depends on secret's entropy, + * meaning that the secret should look like a bunch of random bytes. + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr); + + +/* 128-bit */ + +#ifdef XXH_NAMESPACE +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) + +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) + +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) +#endif + +typedef struct { + XXH64_hash_t low64; + XXH64_hash_t high64; +} XXH128_hash_t; + +XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); /* == XXH128() */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr); + + +/* Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * XXH128_cmp(): + * + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ +XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2); + + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash); +XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/* + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed, + * as it becomes much more difficult for an external actor to guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length XXH3_SECRET_DEFAULT_SIZE + * into an already allocated buffer secretBuffer. + * The generated secret is _always_ XXH_SECRET_DEFAULT_SIZE bytes long. + * + * The generated secret can then be used with any `*_withSecret()` variant. + * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, + * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` + * are part of this list. They all accept a `secret` parameter + * which must be very long for implementation reasons (>= XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so + * this function can be used to generate a secret of proper quality. + * + * customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even stupidly "low entropy" source such as a bunch of zeroes. + * The resulting `secret` will nonetheless provide all expected qualities. + * + * Supplying NULL as the customSeed copies the default secret into `secretBuffer`. + * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + */ +XXH_PUBLIC_API void XXH3_generateSecret(void* secretBuffer, const void* customSeed, size_t customSeedSize); + + +#endif /* XXH_NO_LONG_LONG */ + + +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be found in xxhash.c. + * + * However, code inlining requires the implementation to be visible to the + * compiler, usually within the header. + * + * As a workaround, xxhash.c used to be included within xxhash.h. This caused + * some issues with some build systems, especially ones which treat .c files + * as source files. + * + * Therefore, the implementation is now directly integrated within xxhash.h. + * Another small advantage is that xxhash.c is no longer needed in /include. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ +/*! + * XXH_FORCE_MEMORY_ACCESS: + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow to select a different access method for improved + * performance. + * Method 0 (default): + * Use `memcpy()`. Safe and portable. + * Method 1: + * `__attribute__((packed))` statement. It depends on compiler extensions + * and is therefore not portable. + * This method is safe if your compiler supports it, and *generally* as + * fast or faster than `memcpy`. + * Method 2: + * Direct access via cast. This method doesn't depend on the compiler but + * violates the C standard. + * It can generate buggy code on targets which do not support unaligned + * memory accesses. + * But in some circumstances, it's the only known way to get the most + * performance (ie GCC + ARMv6) + * Method 3: + * Byteshift. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. + * See https://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2 > 3) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7))) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*! + * XXH_ACCEPT_NULL_INPUT_POINTER: + * If the input pointer is NULL, xxHash's default behavior is to dereference it, + * triggering a segfault. + * When this macro is enabled, xxHash actively checks the input for a null pointer. + * If it is, the result for null input pointers is the same as a zero-length input. + */ +#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */ +# define XXH_ACCEPT_NULL_INPUT_POINTER 0 +#endif + +/*! + * XXH_FORCE_ALIGN_CHECK: + * This is an important performance trick + * for architectures without decent unaligned memory access performance. + * It checks for input alignment, and when conditions are met, + * uses a "fast path" employing direct 32-bit/64-bit read, + * resulting in _dramatically faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, but not zero. + * Moreover, it's not useful to generate binary for an additional code path + * if memory access uses same instruction for both aligned and unaligned adresses. + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64 & arm64, + * which are platforms known to offer good unaligned memory accesses performance. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +/*! + * XXH_NO_INLINE_HINTS: + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using + * -fno-inline with GCC or Clang, this will automatically be defined. + */ +#ifndef XXH_NO_INLINE_HINTS +# if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ + || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +/*! + * XXH_REROLL: + * Whether to reroll XXH32_finalize, and XXH64_finalize, + * instead of using an unrolled jump table/if statement loop. + * + * This is automatically defined on -Os/-Oz on GCC and Clang. + */ +#ifndef XXH_REROLL +# if defined(__OPTIMIZE_SIZE__) +# define XXH_REROLL 1 +# else +# define XXH_REROLL 0 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/*! + * Modify the local functions below should you wish to use some other memory + * routines for malloc() and free() + */ +#include + +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free(void* p) { free(p); } + +/*! and for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) +{ + return memcpy(dest,src,size); +} + +#include /* ULLONG_MAX */ + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined(__GNUC__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + + + +/* ************************************* +* Debug +***************************************/ +/* + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# define XXH_ASSERT(c) ((void)0) +#endif + +/* note: use after variable declarations */ +#define XXH_STATIC_ASSERT(c) do { enum { XXH_sa = 1/(int)(!!(c)) }; } while (0) + + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +/* *** Memory access *** */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __pack instructions are safer but compiler specific, hence potentially + * problematic for some compilers. + * + * Currently only defined for GCC and ICC. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign; + return ((const xxh_unalign*)ptr)->u32; +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://stackoverflow.com/a/32095106/646947 + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianess *** */ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/*! + * XXH_CPU_LITTLE_ENDIAN: + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, a runtime check (which is usually constant folded) + * is used instead. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/* + * runtime test, presumed to simplify to a constant by compiler + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + -#ifdef XXH_STATIC_LINKING_ONLY +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -/* ================================================================================================ - This section contains definitions which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - They shall only be used with static linking. - Never use these definitions in association with dynamic linking ! -=================================================================================================== */ +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +static const xxh_u32 XXH_PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */ +static const xxh_u32 XXH_PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */ +static const xxh_u32 XXH_PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */ +static const xxh_u32 XXH_PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */ +static const xxh_u32 XXH_PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * This inline assembly hack forces acc into a normal register. This is the + * only thing that prevents GCC and Clang from autovectorizing the XXH32 + * loop (pragmas and attributes don't work for some resason) without globally + * disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * How this hack works: + * __asm__("" // Declare an assembly block but don't declare any instructions + * : // However, as an Input/Output Operand, + * "+r" // constrain a read/write operand (+) as a general purpose register (r). + * (acc) // and set acc as the operand + * ); + * + * Because of the 'r', the compiler has promised that seed will be in a + * general purpose register and the '+' says that it will be 'read/write', + * so it has to assume it has changed. It is like volatile without all the + * loads and stores. + * + * Since the argument has to be in a normal register (not an SSE register), + * each time XXH32_round is called, it is impossible to vectorize. + */ + __asm__("" : "+r" (acc)); +#endif + return acc; +} + +/* mix all bits */ +static xxh_u32 XXH32_avalanche(xxh_u32 h32) +{ + h32 ^= h32 >> 15; + h32 *= XXH_PRIME32_2; + h32 ^= h32 >> 13; + h32 *= XXH_PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +static xxh_u32 +XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + h32 += (*ptr++) * XXH_PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ +} while (0) + + /* Compact rerolled version */ + if (XXH_REROLL) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(h32); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + /* fallthrough */ + case 8: XXH_PROCESS4; + /* fallthrough */ + case 4: XXH_PROCESS4; + return XXH32_avalanche(h32); + + case 13: XXH_PROCESS4; + /* fallthrough */ + case 9: XXH_PROCESS4; + /* fallthrough */ + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(h32); + + case 14: XXH_PROCESS4; + /* fallthrough */ + case 10: XXH_PROCESS4; + /* fallthrough */ + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(h32); + + case 15: XXH_PROCESS4; + /* fallthrough */ + case 11: XXH_PROCESS4; + /* fallthrough */ + case 7: XXH_PROCESS4; + /* fallthrough */ + case 3: XXH_PROCESS1; + /* fallthrough */ + case 2: XXH_PROCESS1; + /* fallthrough */ + case 1: XXH_PROCESS1; + /* fallthrough */ + case 0: return XXH32_avalanche(h32); + } + XXH_ASSERT(0); + return h32; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + const xxh_u8* bEnd = input + len; + xxh_u32 h32; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (input==NULL) { + len=0; + bEnd=input=(const xxh_u8*)(size_t)16; + } +#endif + + if (len>=16) { + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + + +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); + +#else + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + state.v2 = seed + XXH_PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - XXH_PRIME32_1; + /* do not write into reserved, planned to be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved)); + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + xxh_u32 v1 = state->v1; + xxh_u32 v2 = state->v2; + xxh_u32 v3 = state->v3; + xxh_u32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4; + } while (p<=limit); -/* These definitions are only meant to allow allocation of XXH state - statically, on stack, or in a struct for example. - Do not use members directly. */ + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + + XXH_rotl32(state->v2, 7) + + XXH_rotl32(state->v3, 12) + + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} + + +/******* Canonical representation *******/ + +/* + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} - struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; /* buffer defined as U32 for alignment */ - unsigned memsize; - unsigned reserved; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH32_state_t */ #ifndef XXH_NO_LONG_LONG - struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ - unsigned memsize; - unsigned reserved[2]; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH64_state_t */ -#endif - -# ifdef XXH_PRIVATE_API -# include "xxhash.c" /* include xxhash function bodies as `static`, for inlining */ + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ + +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +/*! + * XXH_REROLL_XXH64: + * Whether to reroll the XXH64_finalize() loop. + * + * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a + * performance gain on 64-bit hosts, as only one jump is required. + * + * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit + * registers, and 64-bit arithmetic needs to be simulated, it isn't beneficial + * to unroll. The code becomes ridiculously large (the largest function in the + * binary on i386!), and rerolling it saves anywhere from 3kB to 20kB. It is + * also slightly faster because it fits into cache better and is more likely + * to be inlined by the compiler. + * + * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. + */ +#ifndef XXH_REROLL_XXH64 +# if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \ + || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \ + || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \ + || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \ + || defined(__mips64__) || defined(__mips64)) /* mips64 */ \ + || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */ +# define XXH_REROLL_XXH64 1 +# else +# define XXH_REROLL_XXH64 0 # endif +#endif /* !defined(XXH_REROLL_XXH64) */ -#endif /* XXH_STATIC_LINKING_ONLY */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; } -#if defined (__cplusplus) +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __pack instructions are safer, but compiler specific, hence potentially + * problematic for some compilers. + * + * Currently only defined for GCC and ICC. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64; + return ((const xxh_unalign64*)ptr)->u64; +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: https://stackoverflow.com/a/32095106/646947 + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64 (xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); } #endif -#endif /* XXHASH_H_5627135585666179 */ + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ + +static const xxh_u64 XXH_PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */ +static const xxh_u64 XXH_PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */ +static const xxh_u64 XXH_PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */ +static const xxh_u64 XXH_PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */ +static const xxh_u64 XXH_PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +static xxh_u64 XXH64_avalanche(xxh_u64 h64) +{ + h64 ^= h64 >> 33; + h64 *= XXH_PRIME64_2; + h64 ^= h64 >> 29; + h64 *= XXH_PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +static xxh_u64 +XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1_64 do { \ + h64 ^= (*ptr++) * XXH_PRIME64_5; \ + h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; \ +} while (0) + +#define XXH_PROCESS4_64 do { \ + h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; \ + ptr += 4; \ + h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; \ +} while (0) + +#define XXH_PROCESS8_64 do { \ + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \ + ptr += 8; \ + h64 ^= k1; \ + h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; \ +} while (0) + + /* Rerolled version for 32-bit targets is faster and much smaller. */ + if (XXH_REROLL || XXH_REROLL_XXH64) { + len &= 31; + while (len >= 8) { + XXH_PROCESS8_64; + len -= 8; + } + if (len >= 4) { + XXH_PROCESS4_64; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1_64; + --len; + } + return XXH64_avalanche(h64); + } else { + switch(len & 31) { + case 24: XXH_PROCESS8_64; + /* fallthrough */ + case 16: XXH_PROCESS8_64; + /* fallthrough */ + case 8: XXH_PROCESS8_64; + return XXH64_avalanche(h64); + + case 28: XXH_PROCESS8_64; + /* fallthrough */ + case 20: XXH_PROCESS8_64; + /* fallthrough */ + case 12: XXH_PROCESS8_64; + /* fallthrough */ + case 4: XXH_PROCESS4_64; + return XXH64_avalanche(h64); + + case 25: XXH_PROCESS8_64; + /* fallthrough */ + case 17: XXH_PROCESS8_64; + /* fallthrough */ + case 9: XXH_PROCESS8_64; + XXH_PROCESS1_64; + return XXH64_avalanche(h64); + + case 29: XXH_PROCESS8_64; + /* fallthrough */ + case 21: XXH_PROCESS8_64; + /* fallthrough */ + case 13: XXH_PROCESS8_64; + /* fallthrough */ + case 5: XXH_PROCESS4_64; + XXH_PROCESS1_64; + return XXH64_avalanche(h64); + + case 26: XXH_PROCESS8_64; + /* fallthrough */ + case 18: XXH_PROCESS8_64; + /* fallthrough */ + case 10: XXH_PROCESS8_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; + return XXH64_avalanche(h64); + + case 30: XXH_PROCESS8_64; + /* fallthrough */ + case 22: XXH_PROCESS8_64; + /* fallthrough */ + case 14: XXH_PROCESS8_64; + /* fallthrough */ + case 6: XXH_PROCESS4_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; + return XXH64_avalanche(h64); + + case 27: XXH_PROCESS8_64; + /* fallthrough */ + case 19: XXH_PROCESS8_64; + /* fallthrough */ + case 11: XXH_PROCESS8_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; + XXH_PROCESS1_64; + return XXH64_avalanche(h64); + + case 31: XXH_PROCESS8_64; + /* fallthrough */ + case 23: XXH_PROCESS8_64; + /* fallthrough */ + case 15: XXH_PROCESS8_64; + /* fallthrough */ + case 7: XXH_PROCESS4_64; + /* fallthrough */ + case 3: XXH_PROCESS1_64; + /* fallthrough */ + case 2: XXH_PROCESS1_64; + /* fallthrough */ + case 1: XXH_PROCESS1_64; + /* fallthrough */ + case 0: return XXH64_avalanche(h64); + } + } + /* impossible to reach */ + XXH_ASSERT(0); + return 0; /* unreachable, but some compilers complain without it */ +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + const xxh_u8* bEnd = input + len; + xxh_u64 h64; + +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + if (input==NULL) { + len=0; + bEnd=input=(const xxh_u8*)(size_t)32; + } +#endif + + if (len>=32) { + const xxh_u8* const limit = bEnd - 32; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + XXH_PRIME64_5; + } + + h64 += (xxh_u64) len; + + return XXH64_finalize(h64, input, len, align); +} + + +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); + +#else + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH64_state_t state; /* use a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)); + state.v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + state.v2 = seed + XXH_PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - XXH_PRIME64_1; + /* do not write into reserved64, might be removed in a future version */ + memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64)); + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH64_state_t* state, const void* input, size_t len) +{ + if (input==NULL) +#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1) + return XXH_OK; +#else + return XXH_ERROR; +#endif + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + xxh_u64 v1 = state->v1; + xxh_u64 v2 = state->v2; + xxh_u64 v3 = state->v3; + xxh_u64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + xxh_u64 const v1 = state->v1; + xxh_u64 const v2 = state->v2; + xxh_u64 const v3 = state->v3; + xxh_u64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} + + +/******* Canonical representation *******/ + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#endif /* XXH_NO_LONG_LONG */ + + +#endif /* XXH_IMPLEMENTATION */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/borg/algorithms/xxhash-libselect.h b/src/borg/algorithms/xxhash-libselect.h new file mode 100644 index 0000000000..365e677ffd --- /dev/null +++ b/src/borg/algorithms/xxhash-libselect.h @@ -0,0 +1,5 @@ +#ifdef BORG_USE_LIBXXHASH +#include +#else +#include "xxh64/xxhash.c" +#endif diff --git a/src/borg/algorithms/zstd-libselect.h b/src/borg/algorithms/zstd-libselect.h new file mode 100644 index 0000000000..9a4ded364b --- /dev/null +++ b/src/borg/algorithms/zstd-libselect.h @@ -0,0 +1,5 @@ +#ifdef BORG_USE_LIBZSTD +#include +#else +#include "zstd/lib/zstd.h" +#endif diff --git a/src/borg/algorithms/zstd/lib/common/bitstream.h b/src/borg/algorithms/zstd/lib/common/bitstream.h new file mode 100644 index 0000000000..37b99c01ee --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/bitstream.h @@ -0,0 +1,454 @@ +/* ****************************************************************** + * bitstream + * Part of FSE library + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ +#ifndef BITSTREAM_H_MODULE +#define BITSTREAM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/* +* This API consists of small unitary functions, which must be inlined for best performance. +* Since link-time-optimization is not available for all compilers, +* these functions are defined into a .h to be included. +*/ + +/*-**************************************** +* Dependencies +******************************************/ +#include "mem.h" /* unaligned access routines */ +#include "compiler.h" /* UNLIKELY() */ +#include "debug.h" /* assert(), DEBUGLOG(), RAWLOG() */ +#include "error_private.h" /* error codes and messages */ + + +/*========================================= +* Target specific +=========================================*/ +#if defined(__BMI__) && defined(__GNUC__) +# include /* support for bextr (experimental) */ +#elif defined(__ICCARM__) +# include +#endif + +#define STREAM_ACCUMULATOR_MIN_32 25 +#define STREAM_ACCUMULATOR_MIN_64 57 +#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) + + +/*-****************************************** +* bitStream encoding API (write forward) +********************************************/ +/* bitStream can mix input from multiple sources. + * A critical property of these streams is that they encode and decode in **reverse** direction. + * So the first bit sequence you add will be the last to be read, like a LIFO stack. + */ +typedef struct { + size_t bitContainer; + unsigned bitPos; + char* startPtr; + char* ptr; + char* endPtr; +} BIT_CStream_t; + +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); + +/* Start with initCStream, providing the size of buffer to write into. +* bitStream will never write outside of this buffer. +* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. +* +* bits are first added to a local register. +* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. +* Writing data into memory is an explicit operation, performed by the flushBits function. +* Hence keep track how many bits are potentially stored into local register to avoid register overflow. +* After a flushBits, a maximum of 7 bits might still be stored into local register. +* +* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. +* +* Last operation is to close the bitStream. +* The function returns the final size of CStream in bytes. +* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) +*/ + + +/*-******************************************** +* bitStream decoding API (read backward) +**********************************************/ +typedef struct { + size_t bitContainer; + unsigned bitsConsumed; + const char* ptr; + const char* start; + const char* limitPtr; +} BIT_DStream_t; + +typedef enum { BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ + /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ + +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + + +/* Start by invoking BIT_initDStream(). +* A chunk of the bitStream is then stored into a local register. +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +* You can then retrieve bitFields stored into the local register, **in reverse order**. +* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. +* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. +* Otherwise, it can be less than that, so proceed accordingly. +* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). +*/ + + +/*-**************************************** +* unsafe API +******************************************/ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ + +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); +/* unsafe version; does not check buffer overflow */ + +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); +/* faster, but works only if nbBits >= 1 */ + + + +/*-************************************************************** +* Internal functions +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (U32 val) +{ + assert(val != 0); + { +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + return _BitScanReverse ( &r, val ) ? (unsigned)r : 0; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ + return __builtin_clz (val) ^ 31; +# elif defined(__ICCARM__) /* IAR Intrinsic */ + return 31 - __CLZ(val); +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, + 11, 14, 16, 18, 22, 25, 3, 30, + 8, 12, 20, 28, 15, 17, 24, 7, + 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; +# endif + } +} + +/*===== Local Constants =====*/ +static const unsigned BIT_mask[] = { + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, + 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, + 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF, 0x7FFFFFF, 0xFFFFFFF, 0x1FFFFFFF, + 0x3FFFFFFF, 0x7FFFFFFF}; /* up to 31 bits */ +#define BIT_MASK_SIZE (sizeof(BIT_mask) / sizeof(BIT_mask[0])) + +/*-************************************************************** +* bitStream encoding +****************************************************************/ +/*! BIT_initCStream() : + * `dstCapacity` must be > sizeof(size_t) + * @return : 0 if success, + * otherwise an error code (can be tested using ERR_isError()) */ +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, + void* startPtr, size_t dstCapacity) +{ + bitC->bitContainer = 0; + bitC->bitPos = 0; + bitC->startPtr = (char*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer); + if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall); + return 0; +} + +/*! BIT_addBits() : + * can add up to 31 bits into `bitC`. + * Note : does not check for register overflow ! */ +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) +{ + MEM_STATIC_ASSERT(BIT_MASK_SIZE == 32); + assert(nbBits < BIT_MASK_SIZE); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); + bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_addBitsFast() : + * works only if `value` is _clean_, + * meaning all high bits above nbBits are 0 */ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, + size_t value, unsigned nbBits) +{ + assert((value>>nbBits) == 0); + assert(nbBits + bitC->bitPos < sizeof(bitC->bitContainer) * 8); + bitC->bitContainer |= value << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_flushBitsFast() : + * assumption : bitContainer has not overflowed + * unsafe version; does not check buffer overflow */ +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(bitC->ptr <= bitC->endPtr); + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; +} + +/*! BIT_flushBits() : + * assumption : bitContainer has not overflowed + * safe version; check for buffer overflow, and prevents it. + * note : does not signal buffer overflow. + * overflow will be revealed later on using BIT_closeCStream() */ +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + assert(bitC->bitPos < sizeof(bitC->bitContainer) * 8); + assert(bitC->ptr <= bitC->endPtr); + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; +} + +/*! BIT_closeCStream() : + * @return : size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) +{ + BIT_addBitsFast(bitC, 1, 1); /* endMark */ + BIT_flushBits(bitC); + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); +} + + +/*-******************************************************** +* bitStream decoding +**********************************************************/ +/*! BIT_initDStream() : + * Initialize a BIT_DStream_t. + * `bitD` : a pointer to an already allocated BIT_DStream_t structure. + * `srcSize` must be the *exact* size of the bitStream, in bytes. + * @return : size of stream (== srcSize), or an errorCode if a problem is detected + */ +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + + bitD->start = (const char*)srcBuffer; + bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer); + + if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } + } else { + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + /* fall-through */ + + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + /* fall-through */ + + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + /* fall-through */ + + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; + /* fall-through */ + + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; + /* fall-through */ + + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; + /* fall-through */ + + default: break; + } + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + if (lastByte == 0) return ERROR(corruption_detected); /* endMark not present */ + } + bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; + } + + return srcSize; +} + +MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) +{ + return bitContainer >> start; +} + +MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) +{ + U32 const regMask = sizeof(bitContainer)*8 - 1; + /* if start > regMask, bitstream is corrupted, and result is undefined */ + assert(nbBits < BIT_MASK_SIZE); + return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; +} + +MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +{ + assert(nbBits < BIT_MASK_SIZE); + return bitContainer & BIT_mask[nbBits]; +} + +/*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted */ +MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) +{ + /* arbitrate between double-shift and shift+mask */ +#if 1 + /* if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8, + * bitstream is likely corrupted, and result is undefined */ + return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits); +#else + /* this code path is slower on my os-x laptop */ + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask); +#endif +} + +/*! BIT_lookBitsFast() : + * unsafe version; only works if nbBits >= 1 */ +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +{ + U32 const regMask = sizeof(bitD->bitContainer)*8 - 1; + assert(nbBits >= 1); + return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask); +} + +MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + +/*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. */ +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits) +{ + size_t const value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_readBitsFast() : + * unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits) +{ + size_t const value = BIT_lookBitsFast(bitD, nbBits); + assert(nbBits >= 1); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_reloadDStreamFast() : + * Similar to BIT_reloadDStream(), but with two differences: + * 1. bitsConsumed <= sizeof(bitD->bitContainer)*8 must hold! + * 2. Returns BIT_DStream_overflow when bitD->ptr < bitD->limitPtr, at this + * point you must use BIT_reloadDStream() to reload. + */ +MEM_STATIC BIT_DStream_status BIT_reloadDStreamFast(BIT_DStream_t* bitD) +{ + if (UNLIKELY(bitD->ptr < bitD->limitPtr)) + return BIT_DStream_overflow; + assert(bitD->bitsConsumed <= sizeof(bitD->bitContainer)*8); + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; +} + +/*! BIT_reloadDStream() : + * Refill `bitD` from buffer previously set in BIT_initDStream() . + * This function is safe, it guarantees it will not read beyond src buffer. + * @return : status of `BIT_DStream_t` internal register. + * when status == BIT_DStream_unfinished, internal register is filled with at least 25 or 57 bits */ +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* overflow detected, like end of stream */ + return BIT_DStream_overflow; + + if (bitD->ptr >= bitD->limitPtr) { + return BIT_reloadDStreamFast(bitD); + } + if (bitD->ptr == bitD->start) { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } + /* start < ptr < limitPtr */ + { U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = BIT_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */ + return result; + } +} + +/*! BIT_endOfDStream() : + * @return : 1 if DStream has _exactly_ reached its end (all bits consumed). + */ +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) +{ + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* BITSTREAM_H_MODULE */ diff --git a/src/borg/algorithms/zstd/lib/common/compiler.h b/src/borg/algorithms/zstd/lib/common/compiler.h new file mode 100644 index 0000000000..95e9483521 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/compiler.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPILER_H +#define ZSTD_COMPILER_H + +/*-******************************************************* +* Compiler specifics +*********************************************************/ +/* force inlining */ + +#if !defined(ZSTD_NO_INLINE) +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# define INLINE_KEYWORD inline +#else +# define INLINE_KEYWORD +#endif + +#if defined(__GNUC__) || defined(__ICCARM__) +# define FORCE_INLINE_ATTR __attribute__((always_inline)) +#elif defined(_MSC_VER) +# define FORCE_INLINE_ATTR __forceinline +#else +# define FORCE_INLINE_ATTR +#endif + +#else + +#define INLINE_KEYWORD +#define FORCE_INLINE_ATTR + +#endif + +/** + * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant + * parameters. They must be inlined for the compiler to eliminate the constant + * branches. + */ +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR +/** + * HINT_INLINE is used to help the compiler generate better code. It is *not* + * used for "templates", so it can be tweaked based on the compilers + * performance. + * + * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the + * always_inline attribute. + * + * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline + * attribute. + */ +#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5 +# define HINT_INLINE static INLINE_KEYWORD +#else +# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR +#endif + +/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */ +#if defined(__GNUC__) +# define UNUSED_ATTR __attribute__((unused)) +#else +# define UNUSED_ATTR +#endif + +/* force no inlining */ +#ifdef _MSC_VER +# define FORCE_NOINLINE static __declspec(noinline) +#else +# if defined(__GNUC__) || defined(__ICCARM__) +# define FORCE_NOINLINE static __attribute__((__noinline__)) +# else +# define FORCE_NOINLINE static +# endif +#endif + +/* target attribute */ +#ifndef __has_attribute + #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */ +#endif +#if defined(__GNUC__) || defined(__ICCARM__) +# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) +#else +# define TARGET_ATTRIBUTE(target) +#endif + +/* Enable runtime BMI2 dispatch based on the CPU. + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. + */ +#ifndef DYNAMIC_BMI2 + #if ((defined(__clang__) && __has_attribute(__target__)) \ + || (defined(__GNUC__) \ + && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ + && (defined(__x86_64__) || defined(_M_X86)) \ + && !defined(__BMI2__) + # define DYNAMIC_BMI2 1 + #else + # define DYNAMIC_BMI2 0 + #endif +#endif + +/* prefetch + * can be disabled, by declaring NO_PREFETCH build macro */ +#if defined(NO_PREFETCH) +# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ +# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +#else +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */ +# include /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1) +# elif defined(__aarch64__) +# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr))) +# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr))) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */) +# else +# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */ +# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* NO_PREFETCH */ + +#define CACHELINE_SIZE 64 + +#define PREFETCH_AREA(p, s) { \ + const char* const _ptr = (const char*)(p); \ + size_t const _size = (size_t)(s); \ + size_t _pos; \ + for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \ + PREFETCH_L2(_ptr + _pos); \ + } \ +} + +/* vectorization + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */ +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) +# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) +# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) +# else +# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")") +# endif +#else +# define DONT_VECTORIZE +#endif + +/* Tell the compiler that a branch is likely or unlikely. + * Only use these macros if it causes the compiler to generate better code. + * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc + * and clang, please do. + */ +#if defined(__GNUC__) +#define LIKELY(x) (__builtin_expect((x), 1)) +#define UNLIKELY(x) (__builtin_expect((x), 0)) +#else +#define LIKELY(x) (x) +#define UNLIKELY(x) (x) +#endif + +/* disable warnings */ +#ifdef _MSC_VER /* Visual Studio */ +# include /* For Visual 2005 */ +# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +# pragma warning(disable : 4324) /* disable: C4324: padded structure */ +#endif + +#endif /* ZSTD_COMPILER_H */ diff --git a/src/borg/algorithms/zstd/lib/common/cpu.h b/src/borg/algorithms/zstd/lib/common/cpu.h new file mode 100644 index 0000000000..6e8a974f62 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/cpu.h @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2018-2020, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMMON_CPU_H +#define ZSTD_COMMON_CPU_H + +/** + * Implementation taken from folly/CpuId.h + * https://github.com/facebook/folly/blob/master/folly/CpuId.h + */ + +#include + +#include "mem.h" + +#ifdef _MSC_VER +#include +#endif + +typedef struct { + U32 f1c; + U32 f1d; + U32 f7b; + U32 f7c; +} ZSTD_cpuid_t; + +MEM_STATIC ZSTD_cpuid_t ZSTD_cpuid(void) { + U32 f1c = 0; + U32 f1d = 0; + U32 f7b = 0; + U32 f7c = 0; +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) + int reg[4]; + __cpuid((int*)reg, 0); + { + int const n = reg[0]; + if (n >= 1) { + __cpuid((int*)reg, 1); + f1c = (U32)reg[2]; + f1d = (U32)reg[3]; + } + if (n >= 7) { + __cpuidex((int*)reg, 7, 0); + f7b = (U32)reg[1]; + f7c = (U32)reg[2]; + } + } +#elif defined(__i386__) && defined(__PIC__) && !defined(__clang__) && defined(__GNUC__) + /* The following block like the normal cpuid branch below, but gcc + * reserves ebx for use of its pic register so we must specially + * handle the save and restore to avoid clobbering the register + */ + U32 n; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(n) + : "a"(0) + : "ecx", "edx"); + if (n >= 1) { + U32 f1a; + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(f1a), "=c"(f1c), "=d"(f1d) + : "a"(1)); + } + if (n >= 7) { + __asm__( + "pushl %%ebx\n\t" + "cpuid\n\t" + "movl %%ebx, %%eax\n\t" + "popl %%ebx" + : "=a"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "edx"); + } +#elif defined(__x86_64__) || defined(_M_X64) || defined(__i386__) + U32 n; + __asm__("cpuid" : "=a"(n) : "a"(0) : "ebx", "ecx", "edx"); + if (n >= 1) { + U32 f1a; + __asm__("cpuid" : "=a"(f1a), "=c"(f1c), "=d"(f1d) : "a"(1) : "ebx"); + } + if (n >= 7) { + U32 f7a; + __asm__("cpuid" + : "=a"(f7a), "=b"(f7b), "=c"(f7c) + : "a"(7), "c"(0) + : "edx"); + } +#endif + { + ZSTD_cpuid_t cpuid; + cpuid.f1c = f1c; + cpuid.f1d = f1d; + cpuid.f7b = f7b; + cpuid.f7c = f7c; + return cpuid; + } +} + +#define X(name, r, bit) \ + MEM_STATIC int ZSTD_cpuid_##name(ZSTD_cpuid_t const cpuid) { \ + return ((cpuid.r) & (1U << bit)) != 0; \ + } + +/* cpuid(1): Processor Info and Feature Bits. */ +#define C(name, bit) X(name, f1c, bit) + C(sse3, 0) + C(pclmuldq, 1) + C(dtes64, 2) + C(monitor, 3) + C(dscpl, 4) + C(vmx, 5) + C(smx, 6) + C(eist, 7) + C(tm2, 8) + C(ssse3, 9) + C(cnxtid, 10) + C(fma, 12) + C(cx16, 13) + C(xtpr, 14) + C(pdcm, 15) + C(pcid, 17) + C(dca, 18) + C(sse41, 19) + C(sse42, 20) + C(x2apic, 21) + C(movbe, 22) + C(popcnt, 23) + C(tscdeadline, 24) + C(aes, 25) + C(xsave, 26) + C(osxsave, 27) + C(avx, 28) + C(f16c, 29) + C(rdrand, 30) +#undef C +#define D(name, bit) X(name, f1d, bit) + D(fpu, 0) + D(vme, 1) + D(de, 2) + D(pse, 3) + D(tsc, 4) + D(msr, 5) + D(pae, 6) + D(mce, 7) + D(cx8, 8) + D(apic, 9) + D(sep, 11) + D(mtrr, 12) + D(pge, 13) + D(mca, 14) + D(cmov, 15) + D(pat, 16) + D(pse36, 17) + D(psn, 18) + D(clfsh, 19) + D(ds, 21) + D(acpi, 22) + D(mmx, 23) + D(fxsr, 24) + D(sse, 25) + D(sse2, 26) + D(ss, 27) + D(htt, 28) + D(tm, 29) + D(pbe, 31) +#undef D + +/* cpuid(7): Extended Features. */ +#define B(name, bit) X(name, f7b, bit) + B(bmi1, 3) + B(hle, 4) + B(avx2, 5) + B(smep, 7) + B(bmi2, 8) + B(erms, 9) + B(invpcid, 10) + B(rtm, 11) + B(mpx, 14) + B(avx512f, 16) + B(avx512dq, 17) + B(rdseed, 18) + B(adx, 19) + B(smap, 20) + B(avx512ifma, 21) + B(pcommit, 22) + B(clflushopt, 23) + B(clwb, 24) + B(avx512pf, 26) + B(avx512er, 27) + B(avx512cd, 28) + B(sha, 29) + B(avx512bw, 30) + B(avx512vl, 31) +#undef B +#define C(name, bit) X(name, f7c, bit) + C(prefetchwt1, 0) + C(avx512vbmi, 1) +#undef C + +#undef X + +#endif /* ZSTD_COMMON_CPU_H */ diff --git a/src/borg/algorithms/zstd/lib/common/debug.c b/src/borg/algorithms/zstd/lib/common/debug.c new file mode 100644 index 0000000000..f303f4a2e5 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/debug.c @@ -0,0 +1,24 @@ +/* ****************************************************************** + * debug + * Part of FSE library + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + + +/* + * This module only hosts one global variable + * which can be used to dynamically influence the verbosity of traces, + * such as DEBUGLOG and RAWLOG + */ + +#include "debug.h" + +int g_debuglevel = DEBUGLEVEL; diff --git a/src/borg/algorithms/zstd/lib/common/debug.h b/src/borg/algorithms/zstd/lib/common/debug.h new file mode 100644 index 0000000000..ac6224888d --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/debug.h @@ -0,0 +1,114 @@ +/* ****************************************************************** + * debug + * Part of FSE library + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + + +/* + * The purpose of this header is to enable debug functions. + * They regroup assert(), DEBUGLOG() and RAWLOG() for run-time, + * and DEBUG_STATIC_ASSERT() for compile-time. + * + * By default, DEBUGLEVEL==0, which means run-time debug is disabled. + * + * Level 1 enables assert() only. + * Starting level 2, traces can be generated and pushed to stderr. + * The higher the level, the more verbose the traces. + * + * It's possible to dynamically adjust level using variable g_debug_level, + * which is only declared if DEBUGLEVEL>=2, + * and is a global variable, not multi-thread protected (use with care) + */ + +#ifndef DEBUG_H_12987983217 +#define DEBUG_H_12987983217 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* static assert is triggered at compile time, leaving no runtime artefact. + * static assert only works with compile-time constants. + * Also, this variant can only be used inside a function. */ +#define DEBUG_STATIC_ASSERT(c) (void)sizeof(char[(c) ? 1 : -1]) + + +/* DEBUGLEVEL is expected to be defined externally, + * typically through compiler command line. + * Value must be a number. */ +#ifndef DEBUGLEVEL +# define DEBUGLEVEL 0 +#endif + + +/* DEBUGFILE can be defined externally, + * typically through compiler command line. + * note : currently useless. + * Value must be stderr or stdout */ +#ifndef DEBUGFILE +# define DEBUGFILE stderr +#endif + + +/* recommended values for DEBUGLEVEL : + * 0 : release mode, no debug, all run-time checks disabled + * 1 : enables assert() only, no display + * 2 : reserved, for currently active debug path + * 3 : events once per object lifetime (CCtx, CDict, etc.) + * 4 : events once per frame + * 5 : events once per block + * 6 : events once per sequence (verbose) + * 7+: events at every position (*very* verbose) + * + * It's generally inconvenient to output traces > 5. + * In which case, it's possible to selectively trigger high verbosity levels + * by modifying g_debug_level. + */ + +#if (DEBUGLEVEL>=1) +# include +#else +# ifndef assert /* assert may be already defined, due to prior #include */ +# define assert(condition) ((void)0) /* disable assert (default) */ +# endif +#endif + +#if (DEBUGLEVEL>=2) +# include +extern int g_debuglevel; /* the variable is only declared, + it actually lives in debug.c, + and is shared by the whole process. + It's not thread-safe. + It's useful when enabling very verbose levels + on selective conditions (such as position in src) */ + +# define RAWLOG(l, ...) { \ + if (l<=g_debuglevel) { \ + fprintf(stderr, __VA_ARGS__); \ + } } +# define DEBUGLOG(l, ...) { \ + if (l<=g_debuglevel) { \ + fprintf(stderr, __FILE__ ": " __VA_ARGS__); \ + fprintf(stderr, " \n"); \ + } } +#else +# define RAWLOG(l, ...) {} /* disabled */ +# define DEBUGLOG(l, ...) {} /* disabled */ +#endif + + +#if defined (__cplusplus) +} +#endif + +#endif /* DEBUG_H_12987983217 */ diff --git a/src/borg/algorithms/zstd/lib/common/entropy_common.c b/src/borg/algorithms/zstd/lib/common/entropy_common.c new file mode 100644 index 0000000000..9d3e4e8e36 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/entropy_common.c @@ -0,0 +1,216 @@ +/* ****************************************************************** + * Common functions of New Generation Entropy library + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +/* ************************************* +* Dependencies +***************************************/ +#include "mem.h" +#include "error_private.h" /* ERR_*, ERROR */ +#define FSE_STATIC_LINKING_ONLY /* FSE_MIN_TABLELOG */ +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY /* HUF_TABLELOG_ABSOLUTEMAX */ +#include "huf.h" + + +/*=== Version ===*/ +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } + + +/*=== Error Management ===*/ +unsigned FSE_isError(size_t code) { return ERR_isError(code); } +const char* FSE_getErrorName(size_t code) { return ERR_getErrorName(code); } + +unsigned HUF_isError(size_t code) { return ERR_isError(code); } +const char* HUF_getErrorName(size_t code) { return ERR_getErrorName(code); } + + +/*-************************************************************** +* FSE NCount encoding-decoding +****************************************************************/ +size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + const BYTE* const istart = (const BYTE*) headerBuffer; + const BYTE* const iend = istart + hbSize; + const BYTE* ip = istart; + int nbBits; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + if (hbSize < 4) { + /* This function only works when hbSize >= 4 */ + char buffer[4]; + memset(buffer, 0, sizeof(buffer)); + memcpy(buffer, headerBuffer, hbSize); + { size_t const countSize = FSE_readNCount(normalizedCounter, maxSVPtr, tableLogPtr, + buffer, sizeof(buffer)); + if (FSE_isError(countSize)) return countSize; + if (countSize > hbSize) return ERROR(corruption_detected); + return countSize; + } } + assert(hbSize >= 4); + + /* init */ + memset(normalizedCounter, 0, (*maxSVPtr+1) * sizeof(normalizedCounter[0])); /* all symbols not present in NCount have a frequency of 0 */ + bitStream = MEM_readLE32(ip); + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) & (charnum<=*maxSVPtr)) { + if (previous0) { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) { + n0 += 24; + if (ip < iend-5) { + ip += 2; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 16; + bitCount += 16; + } } + while ((bitStream & 3) == 3) { + n0 += 3; + bitStream >>= 2; + bitCount += 2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + assert((bitCount >> 3) <= 3); /* For first condition to work */ + ip += bitCount>>3; + bitCount &= 7; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 2; + } } + { int const max = (2*threshold-1) - remaining; + int count; + + if ((bitStream & (threshold-1)) < (U32)max) { + count = bitStream & (threshold-1); + bitCount += nbBits-1; + } else { + count = bitStream & (2*threshold-1); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= count < 0 ? -count : count; /* -1 means +1 */ + normalizedCounter[charnum++] = (short)count; + previous0 = !count; + while (remaining < threshold) { + nbBits--; + threshold >>= 1; + } + + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + } else { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> (bitCount & 31); + } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ + if (remaining != 1) return ERROR(corruption_detected); + if (bitCount > 32) return ERROR(corruption_detected); + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + return ip-istart; +} + + +/*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . +*/ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize) +{ + U32 weightTotal; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + + if (!srcSize) return ERROR(srcSize_wrong); + iSize = ip[0]; + /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ + + if (iSize >= 128) { /* special header */ + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; + { U32 n; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } } } + else { /* header compressed with FSE (normal case) */ + FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); + weightTotal = 0; + { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } } + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ + { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); + *tableLogPtr = tableLog; + /* determine last weight */ + { U32 const total = 1 << tableLog; + U32 const rest = total - weightTotal; + U32 const verif = 1 << BIT_highbit32(rest); + U32 const lastWeight = BIT_highbit32(rest) + 1; + if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankStats[lastWeight]++; + } } + + /* check tree construction validity */ + if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ + + /* results */ + *nbSymbolsPtr = (U32)(oSize+1); + return iSize+1; +} diff --git a/src/borg/algorithms/zstd/lib/common/error_private.c b/src/borg/algorithms/zstd/lib/common/error_private.c new file mode 100644 index 0000000000..cd437529c1 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/error_private.c @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* The purpose of this file is to have a single list of error strings embedded in binary */ + +#include "error_private.h" + +const char* ERR_getErrorString(ERR_enum code) +{ +#ifdef ZSTD_STRIP_ERROR_STRINGS + (void)code; + return "Error strings stripped"; +#else + static const char* const notErrorCode = "Unspecified error code"; + switch( code ) + { + case PREFIX(no_error): return "No error detected"; + case PREFIX(GENERIC): return "Error (generic)"; + case PREFIX(prefix_unknown): return "Unknown frame descriptor"; + case PREFIX(version_unsupported): return "Version not supported"; + case PREFIX(frameParameter_unsupported): return "Unsupported frame parameter"; + case PREFIX(frameParameter_windowTooLarge): return "Frame requires too much memory for decoding"; + case PREFIX(corruption_detected): return "Corrupted block detected"; + case PREFIX(checksum_wrong): return "Restored data doesn't match checksum"; + case PREFIX(parameter_unsupported): return "Unsupported parameter"; + case PREFIX(parameter_outOfBound): return "Parameter is out of bound"; + case PREFIX(init_missing): return "Context should be init first"; + case PREFIX(memory_allocation): return "Allocation error : not enough memory"; + case PREFIX(workSpace_tooSmall): return "workSpace buffer is not large enough"; + case PREFIX(stage_wrong): return "Operation not authorized at current processing stage"; + case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported"; + case PREFIX(maxSymbolValue_tooLarge): return "Unsupported max Symbol Value : too large"; + case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small"; + case PREFIX(dictionary_corrupted): return "Dictionary is corrupted"; + case PREFIX(dictionary_wrong): return "Dictionary mismatch"; + case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples"; + case PREFIX(dstSize_tooSmall): return "Destination buffer is too small"; + case PREFIX(srcSize_wrong): return "Src size is incorrect"; + case PREFIX(dstBuffer_null): return "Operation on NULL destination buffer"; + /* following error codes are not stable and may be removed or changed in a future version */ + case PREFIX(frameIndex_tooLarge): return "Frame index is too large"; + case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking"; + case PREFIX(dstBuffer_wrong): return "Destination buffer is wrong"; + case PREFIX(maxCode): + default: return notErrorCode; + } +#endif +} diff --git a/src/borg/algorithms/zstd/lib/common/error_private.h b/src/borg/algorithms/zstd/lib/common/error_private.h new file mode 100644 index 0000000000..982cf8e9fe --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/error_private.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* Note : this module is expected to remain private, do not expose it */ + +#ifndef ERROR_H_MODULE +#define ERROR_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* **************************************** +* Dependencies +******************************************/ +#include /* size_t */ +#include "zstd_errors.h" /* enum list */ + + +/* **************************************** +* Compiler-specific +******************************************/ +#if defined(__GNUC__) +# define ERR_STATIC static __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define ERR_STATIC static inline +#elif defined(_MSC_VER) +# define ERR_STATIC static __inline +#else +# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/*-**************************************** +* Customization (error_public.h) +******************************************/ +typedef ZSTD_ErrorCode ERR_enum; +#define PREFIX(name) ZSTD_error_##name + + +/*-**************************************** +* Error codes handling +******************************************/ +#undef ERROR /* already defined on Visual Studio */ +#define ERROR(name) ZSTD_ERROR(name) +#define ZSTD_ERROR(name) ((size_t)-PREFIX(name)) + +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } + +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } + +/* check and forward error code */ +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return e +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + + +/*-**************************************** +* Error Strings +******************************************/ + +const char* ERR_getErrorString(ERR_enum code); /* error_private.c */ + +ERR_STATIC const char* ERR_getErrorName(size_t code) +{ + return ERR_getErrorString(ERR_getErrorCode(code)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* ERROR_H_MODULE */ diff --git a/src/borg/algorithms/zstd/lib/common/fse.h b/src/borg/algorithms/zstd/lib/common/fse.h new file mode 100644 index 0000000000..ff54e70ea7 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/fse.h @@ -0,0 +1,688 @@ +/* ****************************************************************** + * FSE : Finite State Entropy codec + * Public Prototypes declaration + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef FSE_H +#define FSE_H + + +/*-***************************************** +* Dependencies +******************************************/ +#include /* size_t, ptrdiff_t */ + + +/*-***************************************** +* FSE_PUBLIC_API : control library symbols visibility +******************************************/ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define FSE_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define FSE_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define FSE_PUBLIC_API __declspec(dllimport) /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define FSE_PUBLIC_API +#endif + +/*------ Version ------*/ +#define FSE_VERSION_MAJOR 0 +#define FSE_VERSION_MINOR 9 +#define FSE_VERSION_RELEASE 0 + +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE +#define FSE_QUOTE(str) #str +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) + +#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) +FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ + + +/*-**************************************** +* FSE simple functions +******************************************/ +/*! FSE_compress() : + Compress content of buffer 'src', of size 'srcSize', into destination buffer 'dst'. + 'dst' buffer must be already allocated. Compression runs faster is dstCapacity >= FSE_compressBound(srcSize). + @return : size of compressed data (<= dstCapacity). + Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression instead. + if FSE_isError(return), compression failed (more details using FSE_getErrorName()) +*/ +FSE_PUBLIC_API size_t FSE_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/*! FSE_decompress(): + Decompress FSE data from buffer 'cSrc', of size 'cSrcSize', + into already allocated destination buffer 'dst', of size 'dstCapacity'. + @return : size of regenerated data (<= maxDstSize), + or an error code, which can be tested using FSE_isError() . + + ** Important ** : FSE_decompress() does not decompress non-compressible nor RLE data !!! + Why ? : making this distinction requires a header. + Header management is intentionally delegated to the user layer, which can better manage special cases. +*/ +FSE_PUBLIC_API size_t FSE_decompress(void* dst, size_t dstCapacity, + const void* cSrc, size_t cSrcSize); + + +/*-***************************************** +* Tool functions +******************************************/ +FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ + +/* Error Management */ +FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ +FSE_PUBLIC_API const char* FSE_getErrorName(size_t code); /* provides error code string (useful for debugging) */ + + +/*-***************************************** +* FSE advanced functions +******************************************/ +/*! FSE_compress2() : + Same as FSE_compress(), but allows the selection of 'maxSymbolValue' and 'tableLog' + Both parameters can be defined as '0' to mean : use default value + @return : size of compressed data + Special values : if return == 0, srcData is not compressible => Nothing is stored within cSrc !!! + if return == 1, srcData is a single byte symbol * srcSize times. Use RLE compression. + if FSE_isError(return), it's an error code. +*/ +FSE_PUBLIC_API size_t FSE_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); + + +/*-***************************************** +* FSE detailed API +******************************************/ +/*! +FSE_compress() does the following: +1. count symbol occurrence from source[] into table count[] (see hist.h) +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) +3. save normalized counters to memory buffer using writeNCount() +4. build encoding table 'CTable' from normalized counters +5. encode the data stream using encoding table 'CTable' + +FSE_decompress() does the following: +1. read normalized counters with readNCount() +2. build decoding table 'DTable' from normalized counters +3. decode the data stream using decoding table 'DTable' + +The following API allows targeting specific sub-functions for advanced tasks. +For example, it's possible to compress several blocks using the same 'CTable', +or to save and provide normalized distribution using external method. +*/ + +/* *** COMPRESSION *** */ + +/*! FSE_optimalTableLog(): + dynamically downsize 'tableLog' when conditions are met. + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. + @return : recommended tableLog (necessarily <= 'maxTableLog') */ +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_normalizeCount(): + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + @return : tableLog, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, + const unsigned* count, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_NCountWriteBound(): + Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. + Typically useful for allocation purpose. */ +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_writeNCount(): + Compactly save 'normalizedCounter' into 'buffer'. + @return : size of the compressed table, + or an errorCode, which can be tested using FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, + const short* normalizedCounter, + unsigned maxSymbolValue, unsigned tableLog); + +/*! Constructor and Destructor of FSE_CTable. + Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ +typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ +FSE_PUBLIC_API FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog); +FSE_PUBLIC_API void FSE_freeCTable (FSE_CTable* ct); + +/*! FSE_buildCTable(): + Builds `ct`, which must be already allocated, using FSE_createCTable(). + @return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_compress_usingCTable(): + Compress `src` using `ct` into `dst` which must be already allocated. + @return : size of compressed data (<= `dstCapacity`), + or 0 if compressed data could not fit into `dst`, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); + +/*! +Tutorial : +---------- +The first step is to count all symbols. FSE_count() does this job very fast. +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) +FSE_count() will return the number of occurrence of the most frequent symbol. +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). + +The next step is to normalize the frequencies. +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. +It also guarantees a minimum of 1 to any Symbol with frequency >= 1. +You can use 'tableLog'==0 to mean "use default tableLog value". +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). + +The result of FSE_normalizeCount() will be saved into a table, +called 'normalizedCounter', which is a table of signed short. +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. +The return value is tableLog if everything proceeded as expected. +It is 0 if there is a single symbol within distribution. +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). + +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). +'buffer' must be already allocated. +For guaranteed success, buffer size must be at least FSE_headerBound(). +The result of the function is the number of bytes written into 'buffer'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). + +'normalizedCounter' can then be used to create the compression table 'CTable'. +The space required by 'CTable' must be already allocated, using FSE_createCTable(). +You can then use FSE_buildCTable() to fill 'CTable'. +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). + +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. +If it returns '0', compressed data could not fit into 'dst'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). +*/ + + +/* *** DECOMPRESSION *** */ + +/*! FSE_readNCount(): + Read compactly saved 'normalizedCounter' from 'rBuffer'. + @return : size read from 'rBuffer', + or an errorCode, which can be tested using FSE_isError(). + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, + unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, + const void* rBuffer, size_t rBuffSize); + +/*! Constructor and Destructor of FSE_DTable. + Note that its size depends on 'tableLog' */ +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ +FSE_PUBLIC_API FSE_DTable* FSE_createDTable(unsigned tableLog); +FSE_PUBLIC_API void FSE_freeDTable(FSE_DTable* dt); + +/*! FSE_buildDTable(): + Builds 'dt', which must be already allocated, using FSE_createDTable(). + return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_decompress_usingDTable(): + Decompress compressed source `cSrc` of size `cSrcSize` using `dt` + into `dst` which must be already allocated. + @return : size of regenerated data (necessarily <= `dstCapacity`), + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); + +/*! +Tutorial : +---------- +(Note : these functions only decompress FSE-compressed blocks. + If block is uncompressed, use memcpy() instead + If block is a single repeated byte, use memset() instead ) + +The first step is to obtain the normalized frequencies of symbols. +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. +In practice, that means it's necessary to know 'maxSymbolValue' beforehand, +or size the table to handle worst case situations (typically 256). +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. +This is performed by the function FSE_buildDTable(). +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). +`cSrcSize` must be strictly correct, otherwise decompression will fail. +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) +*/ + +#endif /* FSE_H */ + +#if defined(FSE_STATIC_LINKING_ONLY) && !defined(FSE_H_FSE_STATIC_LINKING_ONLY) +#define FSE_H_FSE_STATIC_LINKING_ONLY + +/* *** Dependency *** */ +#include "bitstream.h" + + +/* ***************************************** +* Static allocation +*******************************************/ +/* FSE buffer bounds */ +#define FSE_NCOUNTBOUND 512 +#define FSE_BLOCKBOUND(size) (size + (size>>7) + 4 /* fse states */ + sizeof(size_t) /* bitContainer */) +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1< 12) ? (1 << (maxTableLog - 2)) : 1024) ) +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); + +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ + +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` must be >= `(1<= BIT_DStream_completed + +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. +Checking if DStream has reached its end is performed by : + BIT_endOfDStream(&DStream); +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. + FSE_endOfDState(&DState); +*/ + + +/* ***************************************** +* FSE unsafe API +*******************************************/ +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ + + +/* ***************************************** +* Implementation of inlined functions +*******************************************/ +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; /* total 8 bytes */ + +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) +{ + const void* ptr = ct; + const U16* u16ptr = (const U16*) ptr; + const U32 tableLog = MEM_read16(ptr); + statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; + statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1); + statePtr->stateLog = tableLog; +} + + +/*! FSE_initCState2() : +* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) +* uses the smallest state value possible, saving the cost of this symbol */ +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) +{ + FSE_initCState(statePtr, ct); + { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* stateTable = (const U16*)(statePtr->stateTable); + U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); + statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; + statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; + } +} + +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol) +{ + FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* const stateTable = (const U16*)(statePtr->stateTable); + U32 const nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); + BIT_addBits(bitC, statePtr->value, nbBitsOut); + statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; +} + +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) +{ + BIT_addBits(bitC, statePtr->value, statePtr->stateLog); + BIT_flushBits(bitC); +} + + +/* FSE_getMaxNbBits() : + * Approximate maximum cost of a symbol, in bits. + * Fractional get rounded up (i.e : a symbol with a normalized frequency of 3 gives the same result as a frequency of 2) + * note 1 : assume symbolValue is valid (<= maxSymbolValue) + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ +MEM_STATIC U32 FSE_getMaxNbBits(const void* symbolTTPtr, U32 symbolValue) +{ + const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; + return (symbolTT[symbolValue].deltaNbBits + ((1<<16)-1)) >> 16; +} + +/* FSE_bitCost() : + * Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) + * note 1 : assume symbolValue is valid (<= maxSymbolValue) + * note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits */ +MEM_STATIC U32 FSE_bitCost(const void* symbolTTPtr, U32 tableLog, U32 symbolValue, U32 accuracyLog) +{ + const FSE_symbolCompressionTransform* symbolTT = (const FSE_symbolCompressionTransform*) symbolTTPtr; + U32 const minNbBits = symbolTT[symbolValue].deltaNbBits >> 16; + U32 const threshold = (minNbBits+1) << 16; + assert(tableLog < 16); + assert(accuracyLog < 31-tableLog); /* ensure enough room for renormalization double shift */ + { U32 const tableSize = 1 << tableLog; + U32 const deltaFromThreshold = threshold - (symbolTT[symbolValue].deltaNbBits + tableSize); + U32 const normalizedDeltaFromThreshold = (deltaFromThreshold << accuracyLog) >> tableLog; /* linear interpolation (very approximate) */ + U32 const bitMultiplier = 1 << accuracyLog; + assert(symbolTT[symbolValue].deltaNbBits + tableSize <= threshold); + assert(normalizedDeltaFromThreshold <= bitMultiplier); + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold; + } +} + + +/* ====== Decompression ====== */ + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + return DInfo.symbol; +} + +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.newState + lowBits; +} + +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +/*! FSE_decodeSymbolFast() : + unsafe, only works if no symbol has a probability > 50% */ +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + + +#ifndef FSE_COMMONDEFS_ONLY + +/* ************************************************************** +* Tuning parameters +****************************************************************/ +/*!MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#ifndef FSE_MAX_MEMORY_USAGE +# define FSE_MAX_MEMORY_USAGE 14 +#endif +#ifndef FSE_DEFAULT_MEMORY_USAGE +# define FSE_DEFAULT_MEMORY_USAGE 13 +#endif + +/*!FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#ifndef FSE_MAX_SYMBOL_VALUE +# define FSE_MAX_SYMBOL_VALUE 255 +#endif + +/* ************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION +#define FSE_DECODE_TYPE FSE_decode_t + + +#endif /* !FSE_COMMONDEFS_ONLY */ + + +/* *************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + +#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) + + +#endif /* FSE_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/borg/algorithms/zstd/lib/common/fse_decompress.c b/src/borg/algorithms/zstd/lib/common/fse_decompress.c new file mode 100644 index 0000000000..bcc2223ccc --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/fse_decompress.c @@ -0,0 +1,286 @@ +/* ****************************************************************** + * FSE : Finite State Entropy decoder + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include "bitstream.h" +#include "compiler.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#include "error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_isError ERR_isError +#define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ +FSE_DTable* FSE_createDTable (unsigned tableLog) +{ + if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; + return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) ); +} + +void FSE_freeDTable (FSE_DTable* dt) +{ + free(dt); +} + +size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + + U32 const maxSV1 = maxSymbolValue + 1; + U32 const tableSize = 1 << tableLog; + U32 highThreshold = tableSize-1; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ + { FSE_DTableHeader DTableH; + DTableH.tableLog = (U16)tableLog; + DTableH.fastMode = 1; + { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s= largeLimit) DTableH.fastMode=0; + symbolNext[s] = normalizedCounter[s]; + } } } + memcpy(dt, &DTableH, sizeof(DTableH)); + } + + /* Spread symbols */ + { U32 const tableMask = tableSize-1; + U32 const step = FSE_TABLESTEP(tableSize); + U32 s, position = 0; + for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } } + if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + } + + /* Build Decoding table */ + { U32 u; + for (u=0; utableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + void* dPtr = dt + 1; + FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSV1 = tableMask+1; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ + while (1) { + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state1); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state2); + break; + } + + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state2); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state1); + break; + } } + + return op-ostart; +} + + +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; + const U32 fastMode = DTableH->fastMode; + + /* select fast mode (static) */ + if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + + /* normal FSE decoding mode */ + size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(NCountLength)) return NCountLength; + /* if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); */ /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ + if (tableLog > maxLog) return ERROR(tableLog_tooLarge); + ip += NCountLength; + cSrcSize -= NCountLength; + + CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); + + return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ +} + + +typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + +size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize) +{ + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG); +} + + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/src/borg/algorithms/zstd/lib/common/huf.h b/src/borg/algorithms/zstd/lib/common/huf.h new file mode 100644 index 0000000000..ef432685da --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/huf.h @@ -0,0 +1,340 @@ +/* ****************************************************************** + * huff0 huffman codec, + * part of Finite State Entropy library + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef HUF_H_298734234 +#define HUF_H_298734234 + +/* *** Dependencies *** */ +#include /* size_t */ + + +/* *** library symbols visibility *** */ +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual, + * HUF symbols remain "private" (internal symbols for library only). + * Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */ +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4) +# define HUF_PUBLIC_API __attribute__ ((visibility ("default"))) +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) /* Visual expected */ +# define HUF_PUBLIC_API __declspec(dllexport) +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1) +# define HUF_PUBLIC_API __declspec(dllimport) /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */ +#else +# define HUF_PUBLIC_API +#endif + + +/* ========================== */ +/* *** simple functions *** */ +/* ========================== */ + +/** HUF_compress() : + * Compress content from buffer 'src', of size 'srcSize', into buffer 'dst'. + * 'dst' buffer must be already allocated. + * Compression runs faster if `dstCapacity` >= HUF_compressBound(srcSize). + * `srcSize` must be <= `HUF_BLOCKSIZE_MAX` == 128 KB. + * @return : size of compressed data (<= `dstCapacity`). + * Special values : if return == 0, srcData is not compressible => Nothing is stored within dst !!! + * if HUF_isError(return), compression failed (more details using HUF_getErrorName()) + */ +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize); + +/** HUF_decompress() : + * Decompress HUF data from buffer 'cSrc', of size 'cSrcSize', + * into already allocated buffer 'dst', of minimum size 'dstSize'. + * `originalSize` : **must** be the ***exact*** size of original (uncompressed) data. + * Note : in contrast with FSE, HUF_decompress can regenerate + * RLE (cSrcSize==1) and uncompressed (cSrcSize==dstSize) data, + * because it knows size to regenerate (originalSize). + * @return : size of regenerated data (== originalSize), + * or an error code, which can be tested using HUF_isError() + */ +HUF_PUBLIC_API size_t HUF_decompress(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize); + + +/* *** Tool functions *** */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +HUF_PUBLIC_API size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ + +/* Error Management */ +HUF_PUBLIC_API unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */ + + +/* *** Advanced function *** */ + +/** HUF_compress2() : + * Same as HUF_compress(), but offers control over `maxSymbolValue` and `tableLog`. + * `maxSymbolValue` must be <= HUF_SYMBOLVALUE_MAX . + * `tableLog` must be `<= HUF_TABLELOG_MAX` . */ +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog); + +/** HUF_compress4X_wksp() : + * Same as HUF_compress2(), but uses externally allocated `workSpace`. + * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ +#define HUF_WORKSPACE_SIZE ((6 << 10) + 256) +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize); + +#endif /* HUF_H_298734234 */ + +/* ****************************************************************** + * WARNING !! + * The following section contains advanced and experimental definitions + * which shall never be used in the context of a dynamic library, + * because they are not guaranteed to remain stable in the future. + * Only consider them in association with static linking. + * *****************************************************************/ +#if defined(HUF_STATIC_LINKING_ONLY) && !defined(HUF_H_HUF_STATIC_LINKING_ONLY) +#define HUF_H_HUF_STATIC_LINKING_ONLY + +/* *** Dependencies *** */ +#include "mem.h" /* U32 */ + + +/* *** Constants *** */ +#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ +#define HUF_SYMBOLVALUE_MAX 255 + +#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) +# error "HUF_TABLELOG_MAX is too large !" +#endif + + +/* **************************************** +* Static allocation +******************************************/ +/* HUF buffer bounds */ +#define HUF_CTABLEBOUND 129 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */ +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* static allocation of HUF's Compression Table */ +#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ +#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ + U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \ + void* name##hv = &(name##hb); \ + HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ + +/* static allocation of HUF's DTable */ +typedef U32 HUF_DTable; +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) +#define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } + + +/* **************************************** +* Advanced decompression functions +******************************************/ +size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +#endif + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ +#endif + + +/* **************************************** + * HUF detailed API + * ****************************************/ + +/*! HUF_compress() does the following: + * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h") + * 2. (optional) refine tableLog using HUF_optimalTableLog() + * 3. build Huffman table from count using HUF_buildCTable() + * 4. save Huffman table to memory buffer using HUF_writeCTable() + * 5. encode the data stream using HUF_compress4X_usingCTable() + * + * The following API allows targeting specific sub-functions for advanced tasks. + * For example, it's possible to compress several blocks using the same 'CTable', + * or to save and regenerate 'CTable' using external methods. + */ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); +typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ +size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); + +typedef enum { + HUF_repeat_none, /**< Cannot use the previous table */ + HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ + HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */ + } HUF_repeat; +/** HUF_compress4X_repeat() : + * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + * If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress4X_repeat(void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE. + */ +#define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1) +#define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned)) +size_t HUF_buildCTable_wksp (HUF_CElt* tree, + const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, + void* workSpace, size_t wkspSize); + +/*! HUF_readStats() : + * Read compact Huffman tree, saved by HUF_writeCTable(). + * `huffWeight` is destination buffer. + * @return : size read from `src` , or an error Code . + * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, + U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize); + +/** HUF_readCTable() : + * Loading a CTable saved with HUF_writeCTable() */ +size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); + +/** HUF_getNbBits() : + * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX + * Note 1 : is not inlined, as HUF_CElt definition is private + * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ +U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); + +/* + * HUF_decompress() does the following: + * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics + * 2. build Huffman table from save, using HUF_readDTableX?() + * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable() + */ + +/** HUF_selectDecoder() : + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . + * Assumption : 0 < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); + +/** + * The minimum workspace size for the `workSpace` used in + * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp(). + * + * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when + * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15. + * Buffer overflow errors may potentially occur if code modifications result in + * a required workspace size greater than that specified in the following + * macro. + */ +#define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10) +#define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32)) + +#ifndef HUF_FORCE_DECOMPRESS_X2 +size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); +#endif +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize); +#endif + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +#ifndef HUF_FORCE_DECOMPRESS_X2 +size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +#endif +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +#endif + + +/* ====================== */ +/* single stream variants */ +/* ====================== */ + +size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +/** HUF_compress1X_repeat() : + * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. + * If it uses hufTable it does not modify hufTable or repeat. + * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. + * If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress1X_repeat(void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned tableLog, + void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + +size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */ +#endif + +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); +size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); +#ifndef HUF_FORCE_DECOMPRESS_X2 +size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */ +#endif +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */ +#endif + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ +#ifndef HUF_FORCE_DECOMPRESS_X2 +size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +#endif +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +#endif + +/* BMI2 variants. + * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0. + */ +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +#ifndef HUF_FORCE_DECOMPRESS_X2 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); +#endif +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2); +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2); + +#endif /* HUF_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +} +#endif diff --git a/src/borg/algorithms/zstd/lib/common/mem.h b/src/borg/algorithms/zstd/lib/common/mem.h new file mode 100644 index 0000000000..89c8aea7d2 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/mem.h @@ -0,0 +1,453 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-**************************************** +* Dependencies +******************************************/ +#include /* size_t, ptrdiff_t */ +#include /* memcpy */ + + +/*-**************************************** +* Compiler specifics +******************************************/ +#if defined(_MSC_VER) /* Visual Studio */ +# include /* _byteswap_ulong */ +# include /* _byteswap_* */ +#endif +#if defined(__GNUC__) +# define MEM_STATIC static __inline __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline +#else +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + +#ifndef __has_builtin +# define __has_builtin(x) 0 /* compat. with non-clang compilers */ +#endif + +/* code only tested on 32 and 64 bits systems */ +#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } +MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } + +/* detects whether we are being compiled under msan */ +#if defined (__has_feature) +# if __has_feature(memory_sanitizer) +# define MEMORY_SANITIZER 1 +# endif +#endif + +#if defined (MEMORY_SANITIZER) +/* Not all platforms that support msan provide sanitizers/msan_interface.h. + * We therefore declare the functions we need ourselves, rather than trying to + * include the header file... */ + +#include /* intptr_t */ + +/* Make memory region fully initialized (without changing its contents). */ +void __msan_unpoison(const volatile void *a, size_t size); + +/* Make memory region fully uninitialized (without changing its contents). + This is a legacy interface that does not update origin information. Use + __msan_allocated_memory() instead. */ +void __msan_poison(const volatile void *a, size_t size); + +/* Returns the offset of the first (at least partially) poisoned byte in the + memory range, or -1 if the whole range is good. */ +intptr_t __msan_test_shadow(const volatile void *x, size_t size); +#endif + +/* detects whether we are being compiled under asan */ +#if defined (__has_feature) +# if __has_feature(address_sanitizer) +# define ADDRESS_SANITIZER 1 +# endif +#elif defined(__SANITIZE_ADDRESS__) +# define ADDRESS_SANITIZER 1 +#endif + +#if defined (ADDRESS_SANITIZER) +/* Not all platforms that support asan provide sanitizers/asan_interface.h. + * We therefore declare the functions we need ourselves, rather than trying to + * include the header file... */ + +/** + * Marks a memory region ([addr, addr+size)) as unaddressable. + * + * This memory must be previously allocated by your program. Instrumented + * code is forbidden from accessing addresses in this region until it is + * unpoisoned. This function is not guaranteed to poison the entire region - + * it could poison only a subregion of [addr, addr+size) due to ASan + * alignment restrictions. + * + * \note This function is not thread-safe because no two threads can poison or + * unpoison memory in the same memory region simultaneously. + * + * \param addr Start of memory region. + * \param size Size of memory region. */ +void __asan_poison_memory_region(void const volatile *addr, size_t size); + +/** + * Marks a memory region ([addr, addr+size)) as addressable. + * + * This memory must be previously allocated by your program. Accessing + * addresses in this region is allowed until this region is poisoned again. + * This function could unpoison a super-region of [addr, addr+size) due + * to ASan alignment restrictions. + * + * \note This function is not thread-safe because no two threads can + * poison or unpoison memory in the same memory region simultaneously. + * + * \param addr Start of memory region. + * \param size Size of memory region. */ +void __asan_unpoison_memory_region(void const volatile *addr, size_t size); +#endif + + +/*-************************************************************** +* Basic Types +*****************************************************************/ +#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef int16_t S16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef int64_t S64; +#else +# include +#if CHAR_BIT != 8 +# error "this implementation requires char to be exactly 8-bit type" +#endif + typedef unsigned char BYTE; +#if USHRT_MAX != 65535 +# error "this implementation requires short to be exactly 16-bit type" +#endif + typedef unsigned short U16; + typedef signed short S16; +#if UINT_MAX != 4294967295 +# error "this implementation requires int to be exactly 32-bit type" +#endif + typedef unsigned int U32; + typedef signed int S32; +/* note : there are no limits defined for long long type in C90. + * limits exist in C99, however, in such case, is preferred */ + typedef unsigned long long U64; + typedef signed long long S64; +#endif + + +/*-************************************************************** +* Memory I/O +*****************************************************************/ +/* MEM_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets depending on alignment. + * In some circumstances, it's the only known way to get the most performance (i.e. GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define MEM_FORCE_MEMORY_ACCESS 2 +# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__) +# define MEM_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } +MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) + +/* violates C standard, by lying on structure alignment. +Only use if no other choice to achieve best performance on target platform */ +MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } +MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } +MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } +MEM_STATIC size_t MEM_readST(const void* memPtr) { return *(const size_t*) memPtr; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; } + +#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +#if defined(_MSC_VER) || (defined(__INTEL_COMPILER) && defined(WIN32)) + __pragma( pack(push, 1) ) + typedef struct { U16 v; } unalign16; + typedef struct { U32 v; } unalign32; + typedef struct { U64 v; } unalign64; + typedef struct { size_t v; } unalignArch; + __pragma( pack(pop) ) +#else + typedef struct { U16 v; } __attribute__((packed)) unalign16; + typedef struct { U32 v; } __attribute__((packed)) unalign32; + typedef struct { U64 v; } __attribute__((packed)) unalign64; + typedef struct { size_t v; } __attribute__((packed)) unalignArch; +#endif + +MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign16*)ptr)->v; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign32*)ptr)->v; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign64*)ptr)->v; } +MEM_STATIC size_t MEM_readST(const void* ptr) { return ((const unalignArch*)ptr)->v; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign16*)memPtr)->v = value; } +MEM_STATIC void MEM_write32(void* memPtr, U32 value) { ((unalign32*)memPtr)->v = value; } +MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v = value; } + +#else + +/* default method, safe and standard. + can sometimes prove slower */ + +MEM_STATIC U16 MEM_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U32 MEM_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U64 MEM_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC size_t MEM_readST(const void* memPtr) +{ + size_t val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +MEM_STATIC void MEM_write32(void* memPtr, U32 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +MEM_STATIC void MEM_write64(void* memPtr, U64 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* MEM_FORCE_MEMORY_ACCESS */ + +MEM_STATIC U32 MEM_swap32(U32 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_ulong(in); +#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ + || (defined(__clang__) && __has_builtin(__builtin_bswap32)) + return __builtin_bswap32(in); +#else + return ((in << 24) & 0xff000000 ) | + ((in << 8) & 0x00ff0000 ) | + ((in >> 8) & 0x0000ff00 ) | + ((in >> 24) & 0x000000ff ); +#endif +} + +MEM_STATIC U64 MEM_swap64(U64 in) +{ +#if defined(_MSC_VER) /* Visual Studio */ + return _byteswap_uint64(in); +#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \ + || (defined(__clang__) && __has_builtin(__builtin_bswap64)) + return __builtin_bswap64(in); +#else + return ((in << 56) & 0xff00000000000000ULL) | + ((in << 40) & 0x00ff000000000000ULL) | + ((in << 24) & 0x0000ff0000000000ULL) | + ((in << 8) & 0x000000ff00000000ULL) | + ((in >> 8) & 0x00000000ff000000ULL) | + ((in >> 24) & 0x0000000000ff0000ULL) | + ((in >> 40) & 0x000000000000ff00ULL) | + ((in >> 56) & 0x00000000000000ffULL); +#endif +} + +MEM_STATIC size_t MEM_swapST(size_t in) +{ + if (MEM_32bits()) + return (size_t)MEM_swap32((U32)in); + else + return (size_t)MEM_swap64((U64)in); +} + +/*=== Little endian r/w ===*/ + +MEM_STATIC U16 MEM_readLE16(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read16(memPtr); + else { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) +{ + if (MEM_isLittleEndian()) { + MEM_write16(memPtr, val); + } else { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE)val; + p[1] = (BYTE)(val>>8); + } +} + +MEM_STATIC U32 MEM_readLE24(const void* memPtr) +{ + return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); +} + +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) +{ + MEM_writeLE16(memPtr, (U16)val); + ((BYTE*)memPtr)[2] = (BYTE)(val>>16); +} + +MEM_STATIC U32 MEM_readLE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read32(memPtr); + else + return MEM_swap32(MEM_read32(memPtr)); +} + +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) +{ + if (MEM_isLittleEndian()) + MEM_write32(memPtr, val32); + else + MEM_write32(memPtr, MEM_swap32(val32)); +} + +MEM_STATIC U64 MEM_readLE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read64(memPtr); + else + return MEM_swap64(MEM_read64(memPtr)); +} + +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) +{ + if (MEM_isLittleEndian()) + MEM_write64(memPtr, val64); + else + MEM_write64(memPtr, MEM_swap64(val64)); +} + +MEM_STATIC size_t MEM_readLEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeLE32(memPtr, (U32)val); + else + MEM_writeLE64(memPtr, (U64)val); +} + +/*=== Big endian r/w ===*/ + +MEM_STATIC U32 MEM_readBE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_swap32(MEM_read32(memPtr)); + else + return MEM_read32(memPtr); +} + +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) +{ + if (MEM_isLittleEndian()) + MEM_write32(memPtr, MEM_swap32(val32)); + else + MEM_write32(memPtr, val32); +} + +MEM_STATIC U64 MEM_readBE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_swap64(MEM_read64(memPtr)); + else + return MEM_read64(memPtr); +} + +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) +{ + if (MEM_isLittleEndian()) + MEM_write64(memPtr, MEM_swap64(val64)); + else + MEM_write64(memPtr, val64); +} + +MEM_STATIC size_t MEM_readBEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readBE32(memPtr); + else + return (size_t)MEM_readBE64(memPtr); +} + +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeBE32(memPtr, (U32)val); + else + MEM_writeBE64(memPtr, (U64)val); +} + + +#if defined (__cplusplus) +} +#endif + +#endif /* MEM_H_MODULE */ diff --git a/src/borg/algorithms/zstd/lib/common/pool.c b/src/borg/algorithms/zstd/lib/common/pool.c new file mode 100644 index 0000000000..aa4b4de0d3 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/pool.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/* ====== Dependencies ======= */ +#include /* size_t */ +#include "debug.h" /* assert */ +#include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */ +#include "pool.h" + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +#ifdef ZSTD_MULTITHREAD + +#include "threading.h" /* pthread adaptation */ + +/* A job is a function and an opaque argument */ +typedef struct POOL_job_s { + POOL_function function; + void *opaque; +} POOL_job; + +struct POOL_ctx_s { + ZSTD_customMem customMem; + /* Keep track of the threads */ + ZSTD_pthread_t* threads; + size_t threadCapacity; + size_t threadLimit; + + /* The queue is a circular buffer */ + POOL_job *queue; + size_t queueHead; + size_t queueTail; + size_t queueSize; + + /* The number of threads working on jobs */ + size_t numThreadsBusy; + /* Indicates if the queue is empty */ + int queueEmpty; + + /* The mutex protects the queue */ + ZSTD_pthread_mutex_t queueMutex; + /* Condition variable for pushers to wait on when the queue is full */ + ZSTD_pthread_cond_t queuePushCond; + /* Condition variables for poppers to wait on when the queue is empty */ + ZSTD_pthread_cond_t queuePopCond; + /* Indicates if the queue is shutting down */ + int shutdown; +}; + +/* POOL_thread() : + * Work thread for the thread pool. + * Waits for jobs and executes them. + * @returns : NULL on failure else non-null. + */ +static void* POOL_thread(void* opaque) { + POOL_ctx* const ctx = (POOL_ctx*)opaque; + if (!ctx) { return NULL; } + for (;;) { + /* Lock the mutex and wait for a non-empty queue or until shutdown */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + + while ( ctx->queueEmpty + || (ctx->numThreadsBusy >= ctx->threadLimit) ) { + if (ctx->shutdown) { + /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit), + * a few threads will be shutdown while !queueEmpty, + * but enough threads will remain active to finish the queue */ + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return opaque; + } + ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex); + } + /* Pop a job off the queue */ + { POOL_job const job = ctx->queue[ctx->queueHead]; + ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; + ctx->numThreadsBusy++; + ctx->queueEmpty = ctx->queueHead == ctx->queueTail; + /* Unlock the mutex, signal a pusher, and run the job */ + ZSTD_pthread_cond_signal(&ctx->queuePushCond); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + + job.function(job.opaque); + + /* If the intended queue size was 0, signal after finishing job */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + ctx->numThreadsBusy--; + if (ctx->queueSize == 1) { + ZSTD_pthread_cond_signal(&ctx->queuePushCond); + } + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + } + } /* for (;;) */ + assert(0); /* Unreachable */ +} + +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, + ZSTD_customMem customMem) { + POOL_ctx* ctx; + /* Check parameters */ + if (!numThreads) { return NULL; } + /* Allocate the context and zero initialize */ + ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem); + if (!ctx) { return NULL; } + /* Initialize the job queue. + * It needs one extra space since one space is wasted to differentiate + * empty and full queues. + */ + ctx->queueSize = queueSize + 1; + ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem); + ctx->queueHead = 0; + ctx->queueTail = 0; + ctx->numThreadsBusy = 0; + ctx->queueEmpty = 1; + { + int error = 0; + error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL); + error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL); + error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL); + if (error) { POOL_free(ctx); return NULL; } + } + ctx->shutdown = 0; + /* Allocate space for the thread handles */ + ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem); + ctx->threadCapacity = 0; + ctx->customMem = customMem; + /* Check for errors */ + if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; } + /* Initialize the threads */ + { size_t i; + for (i = 0; i < numThreads; ++i) { + if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) { + ctx->threadCapacity = i; + POOL_free(ctx); + return NULL; + } } + ctx->threadCapacity = numThreads; + ctx->threadLimit = numThreads; + } + return ctx; +} + +/*! POOL_join() : + Shutdown the queue, wake any sleeping threads, and join all of the threads. +*/ +static void POOL_join(POOL_ctx* ctx) { + /* Shut down the queue */ + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + ctx->shutdown = 1; + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + /* Wake up sleeping threads */ + ZSTD_pthread_cond_broadcast(&ctx->queuePushCond); + ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); + /* Join all of the threads */ + { size_t i; + for (i = 0; i < ctx->threadCapacity; ++i) { + ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */ + } } +} + +void POOL_free(POOL_ctx *ctx) { + if (!ctx) { return; } + POOL_join(ctx); + ZSTD_pthread_mutex_destroy(&ctx->queueMutex); + ZSTD_pthread_cond_destroy(&ctx->queuePushCond); + ZSTD_pthread_cond_destroy(&ctx->queuePopCond); + ZSTD_free(ctx->queue, ctx->customMem); + ZSTD_free(ctx->threads, ctx->customMem); + ZSTD_free(ctx, ctx->customMem); +} + + + +size_t POOL_sizeof(POOL_ctx *ctx) { + if (ctx==NULL) return 0; /* supports sizeof NULL */ + return sizeof(*ctx) + + ctx->queueSize * sizeof(POOL_job) + + ctx->threadCapacity * sizeof(ZSTD_pthread_t); +} + + +/* @return : 0 on success, 1 on error */ +static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads) +{ + if (numThreads <= ctx->threadCapacity) { + if (!numThreads) return 1; + ctx->threadLimit = numThreads; + return 0; + } + /* numThreads > threadCapacity */ + { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem); + if (!threadPool) return 1; + /* replace existing thread pool */ + memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool)); + ZSTD_free(ctx->threads, ctx->customMem); + ctx->threads = threadPool; + /* Initialize additional threads */ + { size_t threadId; + for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) { + if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) { + ctx->threadCapacity = threadId; + return 1; + } } + } } + /* successfully expanded */ + ctx->threadCapacity = numThreads; + ctx->threadLimit = numThreads; + return 0; +} + +/* @return : 0 on success, 1 on error */ +int POOL_resize(POOL_ctx* ctx, size_t numThreads) +{ + int result; + if (ctx==NULL) return 1; + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + result = POOL_resize_internal(ctx, numThreads); + ZSTD_pthread_cond_broadcast(&ctx->queuePopCond); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return result; +} + +/** + * Returns 1 if the queue is full and 0 otherwise. + * + * When queueSize is 1 (pool was created with an intended queueSize of 0), + * then a queue is empty if there is a thread free _and_ no job is waiting. + */ +static int isQueueFull(POOL_ctx const* ctx) { + if (ctx->queueSize > 1) { + return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize); + } else { + return (ctx->numThreadsBusy == ctx->threadLimit) || + !ctx->queueEmpty; + } +} + + +static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) +{ + POOL_job const job = {function, opaque}; + assert(ctx != NULL); + if (ctx->shutdown) return; + + ctx->queueEmpty = 0; + ctx->queue[ctx->queueTail] = job; + ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize; + ZSTD_pthread_cond_signal(&ctx->queuePopCond); +} + +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) +{ + assert(ctx != NULL); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + /* Wait until there is space in the queue for the new job */ + while (isQueueFull(ctx) && (!ctx->shutdown)) { + ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex); + } + POOL_add_internal(ctx, function, opaque); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); +} + + +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) +{ + assert(ctx != NULL); + ZSTD_pthread_mutex_lock(&ctx->queueMutex); + if (isQueueFull(ctx)) { + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return 0; + } + POOL_add_internal(ctx, function, opaque); + ZSTD_pthread_mutex_unlock(&ctx->queueMutex); + return 1; +} + + +#else /* ZSTD_MULTITHREAD not defined */ + +/* ========================== */ +/* No multi-threading support */ +/* ========================== */ + + +/* We don't need any data, but if it is empty, malloc() might return NULL. */ +struct POOL_ctx_s { + int dummy; +}; +static POOL_ctx g_ctx; + +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { + return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); +} + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { + (void)numThreads; + (void)queueSize; + (void)customMem; + return &g_ctx; +} + +void POOL_free(POOL_ctx* ctx) { + assert(!ctx || ctx == &g_ctx); + (void)ctx; +} + +int POOL_resize(POOL_ctx* ctx, size_t numThreads) { + (void)ctx; (void)numThreads; + return 0; +} + +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) { + (void)ctx; + function(opaque); +} + +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { + (void)ctx; + function(opaque); + return 1; +} + +size_t POOL_sizeof(POOL_ctx* ctx) { + if (ctx==NULL) return 0; /* supports sizeof NULL */ + assert(ctx == &g_ctx); + return sizeof(*ctx); +} + +#endif /* ZSTD_MULTITHREAD */ diff --git a/src/borg/algorithms/zstd/lib/common/pool.h b/src/borg/algorithms/zstd/lib/common/pool.h new file mode 100644 index 0000000000..259bafc975 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/pool.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef POOL_H +#define POOL_H + +#if defined (__cplusplus) +extern "C" { +#endif + + +#include /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */ +#include "../zstd.h" + +typedef struct POOL_ctx_s POOL_ctx; + +/*! POOL_create() : + * Create a thread pool with at most `numThreads` threads. + * `numThreads` must be at least 1. + * The maximum number of queued jobs before blocking is `queueSize`. + * @return : POOL_ctx pointer on success, else NULL. +*/ +POOL_ctx* POOL_create(size_t numThreads, size_t queueSize); + +POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, + ZSTD_customMem customMem); + +/*! POOL_free() : + * Free a thread pool returned by POOL_create(). + */ +void POOL_free(POOL_ctx* ctx); + +/*! POOL_resize() : + * Expands or shrinks pool's number of threads. + * This is more efficient than releasing + creating a new context, + * since it tries to preserve and re-use existing threads. + * `numThreads` must be at least 1. + * @return : 0 when resize was successful, + * !0 (typically 1) if there is an error. + * note : only numThreads can be resized, queueSize remains unchanged. + */ +int POOL_resize(POOL_ctx* ctx, size_t numThreads); + +/*! POOL_sizeof() : + * @return threadpool memory usage + * note : compatible with NULL (returns 0 in this case) + */ +size_t POOL_sizeof(POOL_ctx* ctx); + +/*! POOL_function : + * The function type that can be added to a thread pool. + */ +typedef void (*POOL_function)(void*); + +/*! POOL_add() : + * Add the job `function(opaque)` to the thread pool. `ctx` must be valid. + * Possibly blocks until there is room in the queue. + * Note : The function may be executed asynchronously, + * therefore, `opaque` must live until function has been completed. + */ +void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); + + +/*! POOL_tryAdd() : + * Add the job `function(opaque)` to thread pool _if_ a worker is available. + * Returns immediately even if not (does not block). + * @return : 1 if successful, 0 if not. + */ +int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque); + + +#if defined (__cplusplus) +} +#endif + +#endif diff --git a/src/borg/algorithms/zstd/lib/common/threading.c b/src/borg/algorithms/zstd/lib/common/threading.c new file mode 100644 index 0000000000..e2edb313eb --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/threading.c @@ -0,0 +1,121 @@ +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/** + * This file will hold wrapper for systems, which do not support pthreads + */ + +#include "threading.h" + +/* create fake symbol to avoid empty translation unit warning */ +int g_ZSTD_threading_useless_symbol; + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ + + +/* === Dependencies === */ +#include +#include + + +/* === Implementation === */ + +static unsigned __stdcall worker(void *arg) +{ + ZSTD_pthread_t* const thread = (ZSTD_pthread_t*) arg; + thread->arg = thread->start_routine(thread->arg); + return 0; +} + +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg) +{ + (void)unused; + thread->arg = arg; + thread->start_routine = start_routine; + thread->handle = (HANDLE) _beginthreadex(NULL, 0, worker, thread, 0, NULL); + + if (!thread->handle) + return errno; + else + return 0; +} + +int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr) +{ + DWORD result; + + if (!thread.handle) return 0; + + result = WaitForSingleObject(thread.handle, INFINITE); + switch (result) { + case WAIT_OBJECT_0: + if (value_ptr) *value_ptr = thread.arg; + return 0; + case WAIT_ABANDONED: + return EINVAL; + default: + return GetLastError(); + } +} + +#endif /* ZSTD_MULTITHREAD */ + +#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32) + +#include + +int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr) +{ + *mutex = (pthread_mutex_t*)malloc(sizeof(pthread_mutex_t)); + if (!*mutex) + return 1; + return pthread_mutex_init(*mutex, attr); +} + +int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex) +{ + if (!*mutex) + return 0; + { + int const ret = pthread_mutex_destroy(*mutex); + free(*mutex); + return ret; + } +} + +int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr) +{ + *cond = (pthread_cond_t*)malloc(sizeof(pthread_cond_t)); + if (!*cond) + return 1; + return pthread_cond_init(*cond, attr); +} + +int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond) +{ + if (!*cond) + return 0; + { + int const ret = pthread_cond_destroy(*cond); + free(*cond); + return ret; + } +} + +#endif diff --git a/src/borg/algorithms/zstd/lib/common/threading.h b/src/borg/algorithms/zstd/lib/common/threading.h new file mode 100644 index 0000000000..fd0060d5aa --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/threading.h @@ -0,0 +1,155 @@ +/** + * Copyright (c) 2016 Tino Reichardt + * All rights reserved. + * + * You can contact the author at: + * - zstdmt source repository: https://github.com/mcmilk/zstdmt + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef THREADING_H_938743 +#define THREADING_H_938743 + +#include "debug.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +#if defined(ZSTD_MULTITHREAD) && defined(_WIN32) + +/** + * Windows minimalist Pthread Wrapper, based on : + * http://www.cse.wustl.edu/~schmidt/win32-cv-1.html + */ +#ifdef WINVER +# undef WINVER +#endif +#define WINVER 0x0600 + +#ifdef _WIN32_WINNT +# undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0600 + +#ifndef WIN32_LEAN_AND_MEAN +# define WIN32_LEAN_AND_MEAN +#endif + +#undef ERROR /* reported already defined on VS 2015 (Rich Geldreich) */ +#include +#undef ERROR +#define ERROR(name) ZSTD_ERROR(name) + + +/* mutex */ +#define ZSTD_pthread_mutex_t CRITICAL_SECTION +#define ZSTD_pthread_mutex_init(a, b) ((void)(b), InitializeCriticalSection((a)), 0) +#define ZSTD_pthread_mutex_destroy(a) DeleteCriticalSection((a)) +#define ZSTD_pthread_mutex_lock(a) EnterCriticalSection((a)) +#define ZSTD_pthread_mutex_unlock(a) LeaveCriticalSection((a)) + +/* condition variable */ +#define ZSTD_pthread_cond_t CONDITION_VARIABLE +#define ZSTD_pthread_cond_init(a, b) ((void)(b), InitializeConditionVariable((a)), 0) +#define ZSTD_pthread_cond_destroy(a) ((void)(a)) +#define ZSTD_pthread_cond_wait(a, b) SleepConditionVariableCS((a), (b), INFINITE) +#define ZSTD_pthread_cond_signal(a) WakeConditionVariable((a)) +#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a)) + +/* ZSTD_pthread_create() and ZSTD_pthread_join() */ +typedef struct { + HANDLE handle; + void* (*start_routine)(void*); + void* arg; +} ZSTD_pthread_t; + +int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused, + void* (*start_routine) (void*), void* arg); + +int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr); + +/** + * add here more wrappers as required + */ + + +#elif defined(ZSTD_MULTITHREAD) /* posix assumed ; need a better detection method */ +/* === POSIX Systems === */ +# include + +#if DEBUGLEVEL < 1 + +#define ZSTD_pthread_mutex_t pthread_mutex_t +#define ZSTD_pthread_mutex_init(a, b) pthread_mutex_init((a), (b)) +#define ZSTD_pthread_mutex_destroy(a) pthread_mutex_destroy((a)) +#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock((a)) +#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock((a)) + +#define ZSTD_pthread_cond_t pthread_cond_t +#define ZSTD_pthread_cond_init(a, b) pthread_cond_init((a), (b)) +#define ZSTD_pthread_cond_destroy(a) pthread_cond_destroy((a)) +#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait((a), (b)) +#define ZSTD_pthread_cond_signal(a) pthread_cond_signal((a)) +#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast((a)) + +#define ZSTD_pthread_t pthread_t +#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) +#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) + +#else /* DEBUGLEVEL >= 1 */ + +/* Debug implementation of threading. + * In this implementation we use pointers for mutexes and condition variables. + * This way, if we forget to init/destroy them the program will crash or ASAN + * will report leaks. + */ + +#define ZSTD_pthread_mutex_t pthread_mutex_t* +int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr); +int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex); +#define ZSTD_pthread_mutex_lock(a) pthread_mutex_lock(*(a)) +#define ZSTD_pthread_mutex_unlock(a) pthread_mutex_unlock(*(a)) + +#define ZSTD_pthread_cond_t pthread_cond_t* +int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr); +int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond); +#define ZSTD_pthread_cond_wait(a, b) pthread_cond_wait(*(a), *(b)) +#define ZSTD_pthread_cond_signal(a) pthread_cond_signal(*(a)) +#define ZSTD_pthread_cond_broadcast(a) pthread_cond_broadcast(*(a)) + +#define ZSTD_pthread_t pthread_t +#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d)) +#define ZSTD_pthread_join(a, b) pthread_join((a),(b)) + +#endif + +#else /* ZSTD_MULTITHREAD not defined */ +/* No multithreading support */ + +typedef int ZSTD_pthread_mutex_t; +#define ZSTD_pthread_mutex_init(a, b) ((void)(a), (void)(b), 0) +#define ZSTD_pthread_mutex_destroy(a) ((void)(a)) +#define ZSTD_pthread_mutex_lock(a) ((void)(a)) +#define ZSTD_pthread_mutex_unlock(a) ((void)(a)) + +typedef int ZSTD_pthread_cond_t; +#define ZSTD_pthread_cond_init(a, b) ((void)(a), (void)(b), 0) +#define ZSTD_pthread_cond_destroy(a) ((void)(a)) +#define ZSTD_pthread_cond_wait(a, b) ((void)(a), (void)(b)) +#define ZSTD_pthread_cond_signal(a) ((void)(a)) +#define ZSTD_pthread_cond_broadcast(a) ((void)(a)) + +/* do not use ZSTD_pthread_t */ + +#endif /* ZSTD_MULTITHREAD */ + +#if defined (__cplusplus) +} +#endif + +#endif /* THREADING_H_938743 */ diff --git a/src/borg/algorithms/zstd/lib/common/xxhash.c b/src/borg/algorithms/zstd/lib/common/xxhash.c new file mode 100644 index 0000000000..597de18fc8 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/xxhash.c @@ -0,0 +1,864 @@ +/* + * xxHash - Fast Hash algorithm + * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - xxHash homepage: http://www.xxhash.com + * - xxHash source repository : https://github.com/Cyan4973/xxHash + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_FORCE_MEMORY_ACCESS : + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. + * It can generate buggy code on targets which do not support unaligned memory accesses. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://stackoverflow.com/a/32095106/646947 for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define XXH_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \ + defined(__ICCARM__) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. + * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. + * By default, this option is disabled. To enable it, uncomment below define : + */ +/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independence be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ +# define XXH_FORCE_NATIVE_FORMAT 0 +#endif + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; set to 0 when the input data + * is guaranteed to be aligned. + */ +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/* Modify the local functions below should you wish to use some other memory routines */ +/* for malloc(), free() */ +#include +#include /* size_t */ +static void* XXH_malloc(size_t s) { return malloc(s); } +static void XXH_free (void* p) { free(p); } +/* for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +#endif +#include "xxhash.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# define INLINE_KEYWORD inline +#else +# define INLINE_KEYWORD +#endif + +#if defined(__GNUC__) || defined(__ICCARM__) +# define FORCE_INLINE_ATTR __attribute__((always_inline)) +#elif defined(_MSC_VER) +# define FORCE_INLINE_ATTR __forceinline +#else +# define FORCE_INLINE_ATTR +#endif + +#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR + + +#ifdef _MSC_VER +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/* ************************************* +* Basic Types +***************************************/ +#ifndef MEM_MODULE +# define MEM_MODULE +# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; +# else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */ +# endif +#endif + + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } +static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; + +static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +#else + +/* portable and safe solution. Generally efficient. + * see : http://stackoverflow.com/a/32095106/646947 + */ + +static U32 XXH_read32(const void* memPtr) +{ + U32 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +static U64 XXH_read64(const void* memPtr) +{ + U64 val; + memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ +#if defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +#if defined(__ICCARM__) +# include +# define XXH_rotl32(x,r) __ROR(x,(32 - r)) +#else +# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +#endif +# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) +#endif + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +# define XXH_swap64 _byteswap_uint64 +#elif GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +# define XXH_swap64 __builtin_bswap64 +#else +static U32 XXH_swap32 (U32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +static U64 XXH_swap64 (U64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN + static const int g_one = 1; +# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); + else + return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); +} + +FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} + +FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + if (align==XXH_unaligned) + return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); + else + return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); +} + +FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************* +* Constants +***************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ************************** +* Utils +****************************/ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + + +/* *************************** +* Simple Hash Functions +*****************************/ + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p<=limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32) len; + + while (p+4<=bEnd) { + h32 += XXH_get32bits(p) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_CREATESTATE_STATIC(state); + XXH32_reset(state, seed); + XXH32_update(state, input, len); + return XXH32_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + U64 h64; +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_CREATESTATE_STATIC(state); + XXH64_reset(state, seed); + XXH64_update(state, input, len); + return XXH64_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +/* ************************************************** +* Advanced Hash Functions +****************************************************/ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + + +/*** Hash feed ***/ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem32; + const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p+4<=bEnd) { + h32 += XXH_readLE32(p, endian) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + + +/* **** XXH64 **** */ + +FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + if (input != NULL) { + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + } + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem64; + const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (U64) state->total_len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/* ************************** +* Canonical representation +****************************/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + memcpy(dst, &hash, sizeof(*dst)); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} diff --git a/src/borg/algorithms/zstd/lib/common/xxhash.h b/src/borg/algorithms/zstd/lib/common/xxhash.h new file mode 100644 index 0000000000..4207eba832 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/xxhash.h @@ -0,0 +1,285 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Header File + * Copyright (c) 2012-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - xxHash source repository : https://github.com/Cyan4973/xxHash + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bits version, named XXH64, is available since r35. +It offers much better speed, but for 64-bits applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** +* API modifier +******************************/ +/** XXH_PRIVATE_API +* This is useful if you want to include xxhash functions in `static` mode +* in order to inline them, and remove their symbol from the public list. +* Methodology : +* #define XXH_PRIVATE_API +* #include "xxhash.h" +* `xxhash.c` is automatically included. +* It's not useful to compile and link it as a separate module anymore. +*/ +#ifdef XXH_PRIVATE_API +# ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY +# endif +# if defined(__GNUC__) +# define XXH_PUBLIC_API static __inline __attribute__((unused)) +# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define XXH_PUBLIC_API static inline +# elif defined(_MSC_VER) +# define XXH_PUBLIC_API static __inline +# else +# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ +# endif +#else +# define XXH_PUBLIC_API /* do nothing */ +#endif /* XXH_PRIVATE_API */ + +/*!XXH_NAMESPACE, aka Namespace Emulation : + +If you want to include _and expose_ xxHash functions from within your own library, +but also want to avoid symbol collisions with another library which also includes xxHash, + +you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library +with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). + +Note that no change is required within the calling program as long as it includes `xxhash.h` : +regular symbol name will be automatically translated by this header. +*/ +#ifdef XXH_NAMESPACE +# define XXH_CAT(A,B) A##B +# define XXH_NAME2(A,B) XXH_CAT(A,B) +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) +# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +#endif + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/* **************************** +* Simple Hash Functions +******************************/ +typedef unsigned int XXH32_hash_t; +typedef unsigned long long XXH64_hash_t; + +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*! +XXH32() : + Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s +XXH64() : + Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). +*/ + + +/* **************************** +* Streaming Hash Functions +******************************/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + +/*! State allocation, compatible with dynamic libraries */ + +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); + + +/* hash streaming */ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/* +These functions generate the xxHash of an input provided in multiple segments. +Note that, for small input, they are slower than single-call functions, due to state management. +For small input, prefer `XXH32()` and `XXH64()` . + +XXH state must first be allocated, using XXH*_createState() . + +Start a new hash by initializing state with a seed, using XXH*_reset(). + +Then, feed the hash state by calling XXH*_update() as many times as necessary. +Obviously, input must be allocated and read accessible. +The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + +Finally, a hash value can be produced anytime, by using XXH*_digest(). +This function returns the nn-bits hash as an int or long long. + +It's still possible to continue inserting input into the hash state after a digest, +and generate some new hashes later on, by calling again XXH*_digest(). + +When done, free XXH state space if it was allocated dynamically. +*/ + + +/* ************************** +* Utils +****************************/ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ +# define restrict /* disable restrict */ +#endif + +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); + + +/* ************************** +* Canonical representation +****************************/ +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. +* The canonical representation uses human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. +*/ +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); + +#endif /* XXHASH_H_5627135585666179 */ + + + +/* ================================================================================================ + This section contains definitions which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + They shall only be used with static linking. + Never use these definitions in association with dynamic linking ! +=================================================================================================== */ +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) +#define XXH_STATIC_H_3543687687345 + +/* These definitions are only meant to allow allocation of XXH state + statically, on stack, or in a struct for example. + Do not use members directly. */ + + struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; /* buffer defined as U32 for alignment */ + unsigned memsize; + unsigned reserved; /* never read nor write, will be removed in a future version */ + }; /* typedef'd to XXH32_state_t */ + + struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ + unsigned memsize; + unsigned reserved[2]; /* never read nor write, will be removed in a future version */ + }; /* typedef'd to XXH64_state_t */ + + +# ifdef XXH_PRIVATE_API +# include "xxhash.c" /* include xxhash functions as `static`, for inlining */ +# endif + +#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/borg/algorithms/zstd/lib/common/zstd_common.c b/src/borg/algorithms/zstd/lib/common/zstd_common.c new file mode 100644 index 0000000000..91fe3323a5 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/zstd_common.c @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + + +/*-************************************* +* Dependencies +***************************************/ +#include /* malloc, calloc, free */ +#include /* memset */ +#include "error_private.h" +#include "zstd_internal.h" + + +/*-**************************************** +* Version +******************************************/ +unsigned ZSTD_versionNumber(void) { return ZSTD_VERSION_NUMBER; } + +const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; } + + +/*-**************************************** +* ZSTD Error Management +******************************************/ +#undef ZSTD_isError /* defined within zstd_internal.h */ +/*! ZSTD_isError() : + * tells if a return value is an error code + * symbol is required for external callers */ +unsigned ZSTD_isError(size_t code) { return ERR_isError(code); } + +/*! ZSTD_getErrorName() : + * provides error code string from function result (useful for debugging) */ +const char* ZSTD_getErrorName(size_t code) { return ERR_getErrorName(code); } + +/*! ZSTD_getError() : + * convert a `size_t` function result into a proper ZSTD_errorCode enum */ +ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); } + +/*! ZSTD_getErrorString() : + * provides error code string from enum */ +const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); } + + + +/*=************************************************************** +* Custom allocator +****************************************************************/ +void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) + return customMem.customAlloc(customMem.opaque, size); + return malloc(size); +} + +void* ZSTD_calloc(size_t size, ZSTD_customMem customMem) +{ + if (customMem.customAlloc) { + /* calloc implemented as malloc+memset; + * not as efficient as calloc, but next best guess for custom malloc */ + void* const ptr = customMem.customAlloc(customMem.opaque, size); + memset(ptr, 0, size); + return ptr; + } + return calloc(1, size); +} + +void ZSTD_free(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) { + if (customMem.customFree) + customMem.customFree(customMem.opaque, ptr); + else + free(ptr); + } +} diff --git a/src/borg/algorithms/zstd/lib/common/zstd_errors.h b/src/borg/algorithms/zstd/lib/common/zstd_errors.h new file mode 100644 index 0000000000..998398e7e5 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/zstd_errors.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_ERRORS_H_398273423 +#define ZSTD_ERRORS_H_398273423 + +#if defined (__cplusplus) +extern "C" { +#endif + +/*===== dependency =====*/ +#include /* size_t */ + + +/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */ +#ifndef ZSTDERRORLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define ZSTDERRORLIB_VISIBILITY +# endif +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY +#endif + +/*-********************************************* + * Error codes list + *-********************************************* + * Error codes _values_ are pinned down since v1.3.1 only. + * Therefore, don't rely on values if you may link to any version < v1.3.1. + * + * Only values < 100 are considered stable. + * + * note 1 : this API shall be used with static linking only. + * dynamic linking is not yet officially supported. + * note 2 : Prefer relying on the enum than on its value whenever possible + * This is the only supported way to use the error list < v1.3.1 + * note 3 : ZSTD_isError() is always correct, whatever the library version. + **********************************************/ +typedef enum { + ZSTD_error_no_error = 0, + ZSTD_error_GENERIC = 1, + ZSTD_error_prefix_unknown = 10, + ZSTD_error_version_unsupported = 12, + ZSTD_error_frameParameter_unsupported = 14, + ZSTD_error_frameParameter_windowTooLarge = 16, + ZSTD_error_corruption_detected = 20, + ZSTD_error_checksum_wrong = 22, + ZSTD_error_dictionary_corrupted = 30, + ZSTD_error_dictionary_wrong = 32, + ZSTD_error_dictionaryCreation_failed = 34, + ZSTD_error_parameter_unsupported = 40, + ZSTD_error_parameter_outOfBound = 42, + ZSTD_error_tableLog_tooLarge = 44, + ZSTD_error_maxSymbolValue_tooLarge = 46, + ZSTD_error_maxSymbolValue_tooSmall = 48, + ZSTD_error_stage_wrong = 60, + ZSTD_error_init_missing = 62, + ZSTD_error_memory_allocation = 64, + ZSTD_error_workSpace_tooSmall= 66, + ZSTD_error_dstSize_tooSmall = 70, + ZSTD_error_srcSize_wrong = 72, + ZSTD_error_dstBuffer_null = 74, + /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */ + ZSTD_error_frameIndex_tooLarge = 100, + ZSTD_error_seekableIO = 102, + ZSTD_error_dstBuffer_wrong = 104, + ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */ +} ZSTD_ErrorCode; + +/*! ZSTD_getErrorCode() : + convert a `size_t` function result into a `ZSTD_ErrorCode` enum type, + which can be used to compare with enum list published above */ +ZSTDERRORLIB_API ZSTD_ErrorCode ZSTD_getErrorCode(size_t functionResult); +ZSTDERRORLIB_API const char* ZSTD_getErrorString(ZSTD_ErrorCode code); /**< Same as ZSTD_getErrorName, but using a `ZSTD_ErrorCode` enum argument */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_ERRORS_H_398273423 */ diff --git a/src/borg/algorithms/zstd/lib/common/zstd_internal.h b/src/borg/algorithms/zstd/lib/common/zstd_internal.h new file mode 100644 index 0000000000..3bc7e55a0a --- /dev/null +++ b/src/borg/algorithms/zstd/lib/common/zstd_internal.h @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CCOMMON_H_MODULE +#define ZSTD_CCOMMON_H_MODULE + +/* this module contains definitions which must be identical + * across compression, decompression and dictBuilder. + * It also contains a few functions useful to at least 2 of them + * and which benefit from being inlined */ + +/*-************************************* +* Dependencies +***************************************/ +#ifdef __aarch64__ +#include +#endif +#include "compiler.h" +#include "mem.h" +#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ +#include "error_private.h" +#define ZSTD_STATIC_LINKING_ONLY +#include "../zstd.h" +#define FSE_STATIC_LINKING_ONLY +#include "fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "huf.h" +#ifndef XXH_STATIC_LINKING_ONLY +# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */ +#endif +#include "xxhash.h" /* XXH_reset, update, digest */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ---- static assert (debug) --- */ +#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) +#define ZSTD_isError ERR_isError /* for inlining */ +#define FSE_isError ERR_isError +#define HUF_isError ERR_isError + + +/*-************************************* +* shared macros +***************************************/ +#undef MIN +#undef MAX +#define MIN(a,b) ((a)<(b) ? (a) : (b)) +#define MAX(a,b) ((a)>(b) ? (a) : (b)) + +/** + * Ignore: this is an internal helper. + * + * This is a helper function to help force C99-correctness during compilation. + * Under strict compilation modes, variadic macro arguments can't be empty. + * However, variadic function arguments can be. Using a function therefore lets + * us statically check that at least one (string) argument was passed, + * independent of the compilation flags. + */ +static INLINE_KEYWORD UNUSED_ATTR +void _force_has_format_string(const char *format, ...) { + (void)format; +} + +/** + * Ignore: this is an internal helper. + * + * We want to force this function invocation to be syntactically correct, but + * we don't want to force runtime evaluation of its arguments. + */ +#define _FORCE_HAS_FORMAT_STRING(...) \ + if (0) { \ + _force_has_format_string(__VA_ARGS__); \ + } + +/** + * Return the specified error if the condition evaluates to true. + * + * In debug modes, prints additional information. + * In order to do that (particularly, printing the conditional that failed), + * this can't just wrap RETURN_ERROR(). + */ +#define RETURN_ERROR_IF(cond, err, ...) \ + if (cond) { \ + RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ + __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } + +/** + * Unconditionally return the specified error. + * + * In debug modes, prints additional information. + */ +#define RETURN_ERROR(err, ...) \ + do { \ + RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ + __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } while(0); + +/** + * If the provided expression evaluates to an error code, returns that error code. + * + * In debug modes, prints additional information. + */ +#define FORWARD_IF_ERROR(err, ...) \ + do { \ + size_t const err_code = (err); \ + if (ERR_isError(err_code)) { \ + RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ + __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return err_code; \ + } \ + } while(0); + + +/*-************************************* +* Common constants +***************************************/ +#define ZSTD_OPT_NUM (1<<12) + +#define ZSTD_REP_NUM 3 /* number of repcodes */ +#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) +static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define BIT7 128 +#define BIT6 64 +#define BIT5 32 +#define BIT4 16 +#define BIT1 2 +#define BIT0 1 + +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 +static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; +static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; + +#define ZSTD_FRAMEIDSIZE 4 /* magic number size */ + +#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ +static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; + +#define ZSTD_FRAMECHECKSUMSIZE 4 + +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ + +#define HufLog 12 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; + +#define LONGNBSEQ 0x7F00 + +#define MINMATCH 3 + +#define Litbits 8 +#define MaxLit ((1<= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN)); + + if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { + /* Handle short offset copies. */ + do { + COPY8(op, ip) + } while (op < oend); + } else { + assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN); + /* Separate out the first COPY16() call because the copy length is + * almost certain to be short, so the branches have different + * probabilities. Since it is almost certain to be short, only do + * one COPY16() in the first call. Then, do two calls per loop since + * at that point it is more likely to have a high trip count. + */ +#ifndef __aarch64__ + do { + COPY16(op, ip); + } + while (op < oend); +#else + COPY16(op, ip); + if (op >= oend) return; + do { + COPY16(op, ip); + COPY16(op, ip); + } + while (op < oend); +#endif + } +} + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + if (length > 0) { + memcpy(dst, src, length); + } + return length; +} + +/* define "workspace is too large" as this number of times larger than needed */ +#define ZSTD_WORKSPACETOOLARGE_FACTOR 3 + +/* when workspace is continuously too large + * during at least this number of times, + * context's memory usage is considered wasteful, + * because it's sized to handle a worst case scenario which rarely happens. + * In which case, resize it down to free some memory */ +#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128 + + +/*-******************************************* +* Private declarations +*********************************************/ +typedef struct seqDef_s { + U32 offset; + U16 litLength; + U16 matchLength; +} seqDef; + +typedef struct { + seqDef* sequencesStart; + seqDef* sequences; + BYTE* litStart; + BYTE* lit; + BYTE* llCode; + BYTE* mlCode; + BYTE* ofCode; + size_t maxNbSeq; + size_t maxNbLit; + U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */ + U32 longLengthPos; +} seqStore_t; + +typedef struct { + U32 litLength; + U32 matchLength; +} ZSTD_sequenceLength; + +/** + * Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences + * indicated by longLengthPos and longLengthID, and adds MINMATCH back to matchLength. + */ +MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq) +{ + ZSTD_sequenceLength seqLen; + seqLen.litLength = seq->litLength; + seqLen.matchLength = seq->matchLength + MINMATCH; + if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { + if (seqStore->longLengthID == 1) { + seqLen.litLength += 0xFFFF; + } + if (seqStore->longLengthID == 2) { + seqLen.matchLength += 0xFFFF; + } + } + return seqLen; +} + +/** + * Contains the compressed frame size and an upper-bound for the decompressed frame size. + * Note: before using `compressedSize`, check for errors using ZSTD_isError(). + * similarly, before using `decompressedBound`, check for errors using: + * `decompressedBound != ZSTD_CONTENTSIZE_ERROR` + */ +typedef struct { + size_t compressedSize; + unsigned long long decompressedBound; +} ZSTD_frameSizeInfo; /* decompress & legacy */ + +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */ +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */ + +/* custom memory allocation functions */ +void* ZSTD_malloc(size_t size, ZSTD_customMem customMem); +void* ZSTD_calloc(size_t size, ZSTD_customMem customMem); +void ZSTD_free(void* ptr, ZSTD_customMem customMem); + + +MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */ +{ + assert(val != 0); + { +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + return _BitScanReverse(&r, val) ? (unsigned)r : 0; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ + return __builtin_clz (val) ^ 31; +# elif defined(__ICCARM__) /* IAR Intrinsic */ + return 31 - __CLZ(val); +# else /* Software version */ + static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[(v * 0x07C4ACDDU) >> 27]; +# endif + } +} + + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */ + + +typedef struct { + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; /* declared here for decompress and fullbench */ + +/*! ZSTD_getcBlockSize() : + * Provides the size of compressed block from block header `src` */ +/* Used by: decompress, fullbench (does not get its definition from here) */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, + blockProperties_t* bpPtr); + +/*! ZSTD_decodeSeqHeaders() : + * decode sequence header from src */ +/* Used by: decompress, fullbench (does not get its definition from here) */ +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, + const void* src, size_t srcSize); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/src/borg/algorithms/zstd/lib/compress/fse_compress.c b/src/borg/algorithms/zstd/lib/compress/fse_compress.c new file mode 100644 index 0000000000..a42759814f --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/fse_compress.c @@ -0,0 +1,698 @@ +/* ****************************************************************** + * FSE : Finite State Entropy encoder + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include "../common/compiler.h" +#include "../common/mem.h" /* U32, U16, etc. */ +#include "../common/debug.h" /* assert, DEBUGLOG */ +#include "hist.h" /* HIST_count_wksp */ +#include "../common/bitstream.h" +#define FSE_STATIC_LINKING_ONLY +#include "../common/fse.h" +#include "../common/error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_isError ERR_isError + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + U32 const step = FSE_TABLESTEP(tableSize); + U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; + + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; + U32 highThreshold = tableSize-1; + + /* CTable header */ + if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); + tableU16[-2] = (U16) tableLog; + tableU16[-1] = (U16) maxSymbolValue; + assert(tableLog < 16); /* required for threshold strategy to work */ + + /* For explanations on how to distribute symbol values over the table : + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + + #ifdef __clang_analyzer__ + memset(tableSymbol, 0, sizeof(*tableSymbol) * tableSize); /* useless initialization, just to keep scan-build happy */ + #endif + + /* symbol start positions */ + { U32 u; + cumul[0] = 0; + for (u=1; u <= maxSymbolValue+1; u++) { + if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ + cumul[u] = cumul[u-1] + 1; + tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); + } else { + cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + } } + cumul[maxSymbolValue+1] = tableSize+1; + } + + /* Spread symbols */ + { U32 position = 0; + U32 symbol; + for (symbol=0; symbol<=maxSymbolValue; symbol++) { + int nbOccurrences; + int const freq = normalizedCounter[symbol]; + for (nbOccurrences=0; nbOccurrences highThreshold) + position = (position + step) & tableMask; /* Low proba area */ + } } + + assert(position==0); /* Must have initialized all positions */ + } + + /* Build table */ + { U32 u; for (u=0; u> 3) + 3; + return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ +} + +static size_t +FSE_writeNCount_generic (void* header, size_t headerBufferSize, + const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, + unsigned writeIsSafe) +{ + BYTE* const ostart = (BYTE*) header; + BYTE* out = ostart; + BYTE* const oend = ostart + headerBufferSize; + int nbBits; + const int tableSize = 1 << tableLog; + int remaining; + int threshold; + U32 bitStream = 0; + int bitCount = 0; + unsigned symbol = 0; + unsigned const alphabetSize = maxSymbolValue + 1; + int previousIs0 = 0; + + /* Table Size */ + bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; + bitCount += 4; + + /* Init */ + remaining = tableSize+1; /* +1 for extra accuracy */ + threshold = tableSize; + nbBits = tableLog+1; + + while ((symbol < alphabetSize) && (remaining>1)) { /* stops at 1 */ + if (previousIs0) { + unsigned start = symbol; + while ((symbol < alphabetSize) && !normalizedCounter[symbol]) symbol++; + if (symbol == alphabetSize) break; /* incorrect distribution */ + while (symbol >= start+24) { + start+=24; + bitStream += 0xFFFFU << bitCount; + if ((!writeIsSafe) && (out > oend-2)) + return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE) bitStream; + out[1] = (BYTE)(bitStream>>8); + out+=2; + bitStream>>=16; + } + while (symbol >= start+3) { + start+=3; + bitStream += 3 << bitCount; + bitCount += 2; + } + bitStream += (symbol-start) << bitCount; + bitCount += 2; + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) + return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + { int count = normalizedCounter[symbol++]; + int const max = (2*threshold-1) - remaining; + remaining -= count < 0 ? -count : count; + count++; /* +1 for extra accuracy */ + if (count>=threshold) + count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ + bitStream += count << bitCount; + bitCount += nbBits; + bitCount -= (count>=1; } + } + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) + return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + + if (remaining != 1) + return ERROR(GENERIC); /* incorrect normalized distribution */ + assert(symbol <= alphabetSize); + + /* flush remaining bitStream */ + if ((!writeIsSafe) && (out > oend - 2)) + return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out+= (bitCount+7) /8; + + return (out-ostart); +} + + +size_t FSE_writeNCount (void* buffer, size_t bufferSize, + const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ + + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); + + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1 /* write in buffer is safe */); +} + + +/*-************************************************************** +* FSE Compression Code +****************************************************************/ + +FSE_CTable* FSE_createCTable (unsigned maxSymbolValue, unsigned tableLog) +{ + size_t size; + if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX; + size = FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); + return (FSE_CTable*)malloc(size); +} + +void FSE_freeCTable (FSE_CTable* ct) { free(ct); } + +/* provides the minimum logSize to safely represent a distribution */ +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) +{ + U32 minBitsSrc = BIT_highbit32((U32)(srcSize)) + 1; + U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + return minBits; +} + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) +{ + U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 tableLog = maxTableLog; + U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); + assert(srcSize > 1); /* Not supported, RLE should be used instead */ + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ + if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ + if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; + if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; + return tableLog; +} + +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); +} + + +/* Secondary normalization method. + To be used when primary method fails. */ + +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) +{ + short const NOT_YET_ASSIGNED = -2; + U32 s; + U32 distributed = 0; + U32 ToDistribute; + + /* Init */ + U32 const lowThreshold = (U32)(total >> tableLog); + U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == 0) { + norm[s]=0; + continue; + } + if (count[s] <= lowThreshold) { + norm[s] = -1; + distributed++; + total -= count[s]; + continue; + } + if (count[s] <= lowOne) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + + norm[s]=NOT_YET_ASSIGNED; + } + ToDistribute = (1 << tableLog) - distributed; + + if (ToDistribute == 0) + return 0; + + if ((total / ToDistribute) > lowOne) { + /* risk of rounding to zero */ + lowOne = (U32)((total * 3) / (ToDistribute * 2)); + for (s=0; s<=maxSymbolValue; s++) { + if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } } + ToDistribute = (1 << tableLog) - distributed; + } + + if (distributed == maxSymbolValue+1) { + /* all values are pretty poor; + probably incompressible data (should have already been detected); + find max, then give all remaining points to max */ + U32 maxV = 0, maxC = 0; + for (s=0; s<=maxSymbolValue; s++) + if (count[s] > maxC) { maxV=s; maxC=count[s]; } + norm[maxV] += (short)ToDistribute; + return 0; + } + + if (total == 0) { + /* all of the symbols were low enough for the lowOne or lowThreshold */ + for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) + if (norm[s] > 0) { ToDistribute--; norm[s]++; } + return 0; + } + + { U64 const vStepLog = 62 - tableLog; + U64 const mid = (1ULL << (vStepLog-1)) - 1; + U64 const rStep = ((((U64)1<> vStepLog); + U32 const sEnd = (U32)(end >> vStepLog); + U32 const weight = sEnd - sStart; + if (weight < 1) + return ERROR(GENERIC); + norm[s] = (short)weight; + tmpTotal = end; + } } } + + return 0; +} + + +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, + const unsigned* count, size_t total, + unsigned maxSymbolValue) +{ + /* Sanity checks */ + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ + + { static U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + U64 const scale = 62 - tableLog; + U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ + U64 const vStep = 1ULL<<(scale-20); + int stillToDistribute = 1<> tableLog); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == total) return 0; /* rle special case */ + if (count[s] == 0) { normalizedCounter[s]=0; continue; } + if (count[s] <= lowThreshold) { + normalizedCounter[s] = -1; + stillToDistribute--; + } else { + short proba = (short)((count[s]*step) >> scale); + if (proba<8) { + U64 restToBeat = vStep * rtbTable[proba]; + proba += (count[s]*step) - ((U64)proba< restToBeat; + } + if (proba > largestP) { largestP=proba; largest=s; } + normalizedCounter[s] = proba; + stillToDistribute -= proba; + } } + if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { + /* corner case, need another normalization method */ + size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); + if (FSE_isError(errorCode)) return errorCode; + } + else normalizedCounter[largest] += (short)stillToDistribute; + } + +#if 0 + { /* Print Table (debug) */ + U32 s; + U32 nTotal = 0; + for (s=0; s<=maxSymbolValue; s++) + RAWLOG(2, "%3i: %4i \n", s, normalizedCounter[s]); + for (s=0; s<=maxSymbolValue; s++) + nTotal += abs(normalizedCounter[s]); + if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* header */ + tableU16[-2] = (U16) nbBits; + tableU16[-1] = (U16) maxSymbolValue; + + /* Build table */ + for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + FSE_FLUSHBITS(&bitC); + } + + /* 2 or 4 encoding per loop */ + while ( ip>istart ) { + + FSE_encodeSymbol(&bitC, &CState2, *--ip); + + if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ + FSE_FLUSHBITS(&bitC); + + FSE_encodeSymbol(&bitC, &CState1, *--ip); + + if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + } + + FSE_FLUSHBITS(&bitC); + } + + FSE_flushCState(&bitC, &CState2); + FSE_flushCState(&bitC, &CState1); + return BIT_closeCStream(&bitC); +} + +size_t FSE_compress_usingCTable (void* dst, size_t dstSize, + const void* src, size_t srcSize, + const FSE_CTable* ct) +{ + unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); + + if (fast) + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + else + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); +} + + +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` size must be `(1< not compressible */ + if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ + } + + tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += nc_err; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + /* check compressibility */ + if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; + + return op-ostart; +} + +typedef struct { + FSE_CTable CTable_max[FSE_CTABLE_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)]; + BYTE scratchBuffer[1 << FSE_MAX_TABLELOG]; +} fseWkspMax_t; + +size_t FSE_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog) +{ + fseWkspMax_t scratchBuffer; + DEBUG_STATIC_ASSERT(sizeof(scratchBuffer) >= FSE_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)); /* compilation failures here means scratchBuffer is not large enough */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_compress_wksp(dst, dstCapacity, src, srcSize, maxSymbolValue, tableLog, &scratchBuffer, sizeof(scratchBuffer)); +} + +size_t FSE_compress (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return FSE_compress2(dst, dstCapacity, src, srcSize, FSE_MAX_SYMBOL_VALUE, FSE_DEFAULT_TABLELOG); +} + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/src/borg/algorithms/zstd/lib/compress/hist.c b/src/borg/algorithms/zstd/lib/compress/hist.c new file mode 100644 index 0000000000..61e08c7968 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/hist.c @@ -0,0 +1,183 @@ +/* ****************************************************************** + * hist : Histogram functions + * part of Finite State Entropy project + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +/* --- dependencies --- */ +#include "../common/mem.h" /* U32, BYTE, etc. */ +#include "../common/debug.h" /* assert, DEBUGLOG */ +#include "../common/error_private.h" /* ERROR */ +#include "hist.h" + + +/* --- Error management --- */ +unsigned HIST_isError(size_t code) { return ERR_isError(code); } + +/*-************************************************************** + * Histogram functions + ****************************************************************/ +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned largestCount=0; + + memset(count, 0, (maxSymbolValue+1) * sizeof(*count)); + if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } + + while (ip largestCount) largestCount = count[s]; + } + + return largestCount; +} + +typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e; + +/* HIST_count_parallel_wksp() : + * store histogram into 4 intermediate tables, recombined at the end. + * this design makes better use of OoO cpus, + * and is noticeably faster when some values are heavily repeated. + * But it needs some additional workspace for intermediate tables. + * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32. + * @return : largest histogram frequency, + * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */ +static size_t HIST_count_parallel_wksp( + unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + HIST_checkInput_e check, + U32* const workSpace) +{ + const BYTE* ip = (const BYTE*)source; + const BYTE* const iend = ip+sourceSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + U32* const Counting1 = workSpace; + U32* const Counting2 = Counting1 + 256; + U32* const Counting3 = Counting2 + 256; + U32* const Counting4 = Counting3 + 256; + + memset(workSpace, 0, 4*256*sizeof(unsigned)); + + /* safety checks */ + if (!sourceSize) { + memset(count, 0, maxSymbolValue + 1); + *maxSymbolValuePtr = 0; + return 0; + } + if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + + /* by stripes of 16 bytes */ + { U32 cached = MEM_read32(ip); ip += 4; + while (ip < iend-15) { + U32 c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + } + ip-=4; + } + + /* finish last symbols */ + while (ipmaxSymbolValue; s--) { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); + } } + + { U32 s; + if (maxSymbolValue > 255) maxSymbolValue = 255; + for (s=0; s<=maxSymbolValue; s++) { + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; + if (count[s] > max) max = count[s]; + } } + + while (!count[maxSymbolValue]) maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + return (size_t)max; +} + +/* HIST_countFast_wksp() : + * Same as HIST_countFast(), but using an externally provided scratch buffer. + * `workSpace` is a writable buffer which must be 4-bytes aligned, + * `workSpaceSize` must be >= HIST_WKSP_SIZE + */ +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + void* workSpace, size_t workSpaceSize) +{ + if (sourceSize < 1500) /* heuristic threshold */ + return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize); + if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ + if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); + return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace); +} + +/* fast variant (unsafe : won't check if src contains values beyond count[] limit) */ +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize) +{ + unsigned tmpCounters[HIST_WKSP_SIZE_U32]; + return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters)); +} + +/* HIST_count_wksp() : + * Same as HIST_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */ +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + void* workSpace, size_t workSpaceSize) +{ + if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ + if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall); + if (*maxSymbolValuePtr < 255) + return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace); + *maxSymbolValuePtr = 255; + return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize); +} + +size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + unsigned tmpCounters[HIST_WKSP_SIZE_U32]; + return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters)); +} diff --git a/src/borg/algorithms/zstd/lib/compress/hist.h b/src/borg/algorithms/zstd/lib/compress/hist.h new file mode 100644 index 0000000000..77e3ec4fb1 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/hist.h @@ -0,0 +1,75 @@ +/* ****************************************************************** + * hist : Histogram functions + * part of Finite State Entropy project + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +/* --- dependencies --- */ +#include /* size_t */ + + +/* --- simple histogram functions --- */ + +/*! HIST_count(): + * Provides the precise count of each byte within a table 'count'. + * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1). + * Updates *maxSymbolValuePtr with actual largest symbol value detected. + * @return : count of the most frequent symbol (which isn't identified). + * or an error code, which can be tested using HIST_isError(). + * note : if return == srcSize, there is only one symbol. + */ +size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize); + +unsigned HIST_isError(size_t code); /**< tells if a return value is an error code */ + + +/* --- advanced histogram functions --- */ + +#define HIST_WKSP_SIZE_U32 1024 +#define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned)) +/** HIST_count_wksp() : + * Same as HIST_count(), but using an externally provided scratch buffer. + * Benefit is this function will use very little stack space. + * `workSpace` is a writable buffer which must be 4-bytes aligned, + * `workSpaceSize` must be >= HIST_WKSP_SIZE + */ +size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize, + void* workSpace, size_t workSpaceSize); + +/** HIST_countFast() : + * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr. + * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` + */ +size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize); + +/** HIST_countFast_wksp() : + * Same as HIST_countFast(), but using an externally provided scratch buffer. + * `workSpace` is a writable buffer which must be 4-bytes aligned, + * `workSpaceSize` must be >= HIST_WKSP_SIZE + */ +size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize, + void* workSpace, size_t workSpaceSize); + +/*! HIST_count_simple() : + * Same as HIST_countFast(), this function is unsafe, + * and will segfault if any value within `src` is `> *maxSymbolValuePtr`. + * It is also a bit slower for large inputs. + * However, it does not need any additional memory (not even on stack). + * @return : count of the most frequent symbol. + * Note this function doesn't produce any error (i.e. it must succeed). + */ +unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize); diff --git a/src/borg/algorithms/zstd/lib/compress/huf_compress.c b/src/borg/algorithms/zstd/lib/compress/huf_compress.c new file mode 100644 index 0000000000..546879868a --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/huf_compress.c @@ -0,0 +1,798 @@ +/* ****************************************************************** + * Huffman encoder, part of New Generation Entropy library + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * - Public forum : https://groups.google.com/forum/#!forum/lz4c + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* memcpy, memset */ +#include /* printf (debug) */ +#include "../common/compiler.h" +#include "../common/bitstream.h" +#include "hist.h" +#define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ +#include "../common/fse.h" /* header compression */ +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" +#include "../common/error_private.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_isError ERR_isError +#define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ + + +/* ************************************************************** +* Utils +****************************************************************/ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); +} + + +/* ******************************************************* +* HUF : Huffman block compression +*********************************************************/ +/* HUF_compressWeights() : + * Same as FSE_compress(), but dedicated to huff0's weights compression. + * The use case needs much less stack memory. + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. + */ +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 +static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const oend = ostart + dstSize; + + unsigned maxSymbolValue = HUF_TABLELOG_MAX; + U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; + + FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; + BYTE scratchBuffer[1< not compressible */ + } + + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) ); + op += hSize; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + return (size_t)(op-ostart); +} + + +struct HUF_CElt_s { + U16 val; + BYTE nbBits; +}; /* typedef'd to HUF_CElt within "huf.h" */ + +/*! HUF_writeCTable() : + `CTable` : Huffman tree to save, using huf representation. + @return : size of saved CTable */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, + const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog) +{ + BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; + BYTE* op = (BYTE*)dst; + U32 n; + + /* check conditions */ + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + + /* convert to weight */ + bitsToWeight[0] = 0; + for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ + op[0] = (BYTE)hSize; + return hSize+1; + } } + + /* write raw values as 4-bits (max : 15) */ + if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ + if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ + op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); + huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ + for (n=0; n HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); + + /* Prepare base value per rank */ + { U32 n, nextRankStart = 0; + for (n=1; n<=tableLog; n++) { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } } + + /* fill nbBits */ + *hasZeroWeights = 0; + { U32 n; for (n=0; nn=tableLog+1 */ + U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; + { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + /* assign value within rank, symbol order */ + { U32 n; for (n=0; n maxNbBits */ + + /* there are several too large elements (at least >= 2) */ + { int totalCost = 0; + const U32 baseCost = 1 << (largestBits - maxNbBits); + int n = (int)lastNonNull; + + while (huffNode[n].nbBits > maxNbBits) { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); + huffNode[n].nbBits = (BYTE)maxNbBits; + n --; + } /* n stops at huffNode[n].nbBits <= maxNbBits */ + while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ + + /* renorm totalCost */ + totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ + + /* repay normalized cost */ + { U32 const noSymbol = 0xF0F0F0F0; + U32 rankLast[HUF_TABLELOG_MAX+2]; + + /* Get pos of last (smallest) symbol per rank */ + memset(rankLast, 0xF0, sizeof(rankLast)); + { U32 currentNbBits = maxNbBits; + int pos; + for (pos=n ; pos >= 0; pos--) { + if (huffNode[pos].nbBits >= currentNbBits) continue; + currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ + rankLast[maxNbBits-currentNbBits] = (U32)pos; + } } + + while (totalCost > 0) { + U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1; + for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { + U32 const highPos = rankLast[nBitsToDecrease]; + U32 const lowPos = rankLast[nBitsToDecrease-1]; + if (highPos == noSymbol) continue; + if (lowPos == noSymbol) break; + { U32 const highTotal = huffNode[highPos].count; + U32 const lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) break; + } } + /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ + /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ + while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) + nBitsToDecrease ++; + totalCost -= 1 << (nBitsToDecrease-1); + if (rankLast[nBitsToDecrease-1] == noSymbol) + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ + huffNode[rankLast[nBitsToDecrease]].nbBits ++; + if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol; + else { + rankLast[nBitsToDecrease]--; + if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ + } } /* while (totalCost > 0) */ + + while (totalCost < 0) { /* Sometimes, cost correction overshoot */ + if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + while (huffNode[n].nbBits == maxNbBits) n--; + huffNode[n+1].nbBits--; + assert(n >= 0); + rankLast[1] = (U32)(n+1); + totalCost++; + continue; + } + huffNode[ rankLast[1] + 1 ].nbBits--; + rankLast[1]++; + totalCost ++; + } } } /* there are several too large elements (at least >= 2) */ + + return maxNbBits; +} + +typedef struct { + U32 base; + U32 current; +} rankPos; + +typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; + +#define RANK_POSITION_TABLE_SIZE 32 + +typedef struct { + huffNodeTable huffNodeTbl; + rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; +} HUF_buildCTable_wksp_tables; + +static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition) +{ + U32 n; + + memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); + for (n=0; n<=maxSymbolValue; n++) { + U32 r = BIT_highbit32(count[n] + 1); + rankPosition[r].base ++; + } + for (n=30; n>0; n--) rankPosition[n-1].base += rankPosition[n].base; + for (n=0; n<32; n++) rankPosition[n].current = rankPosition[n].base; + for (n=0; n<=maxSymbolValue; n++) { + U32 const c = count[n]; + U32 const r = BIT_highbit32(c+1) + 1; + U32 pos = rankPosition[r].current++; + while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) { + huffNode[pos] = huffNode[pos-1]; + pos--; + } + huffNode[pos].count = c; + huffNode[pos].byte = (BYTE)n; + } +} + + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). + */ +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) + +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +{ + HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace; + nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; + nodeElt* const huffNode = huffNode0+1; + int nonNullRank; + int lowS, lowN; + int nodeNb = STARTNODE; + int n, nodeRoot; + + /* safety checks */ + if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ + if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) + return ERROR(workSpace_tooSmall); + if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) + return ERROR(maxSymbolValue_tooLarge); + memset(huffNode0, 0, sizeof(huffNodeTable)); + + /* sort, decreasing order */ + HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); + + /* init for parents */ + nonNullRank = (int)maxSymbolValue; + while(huffNode[nonNullRank].count == 0) nonNullRank--; + lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; + huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; + nodeNb++; lowS-=2; + for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); + huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ + + /* create parents */ + while (nodeNb <= nodeRoot) { + int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; + huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; + nodeNb++; + } + + /* distribute weights (unlimited tree height) */ + huffNode[nodeRoot].nbBits = 0; + for (n=nodeRoot-1; n>=STARTNODE; n--) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + for (n=0; n<=nonNullRank; n++) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + + /* enforce maxTableLog */ + maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); + + /* fill result into tree (val, nbBits) */ + { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; + int const alphabetSize = (int)(maxSymbolValue + 1); + if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ + for (n=0; n<=nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + /* determine stating value per rank */ + { U16 min = 0; + for (n=(int)maxNbBits; n>0; n--) { + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + for (n=0; n> 3; +} + +int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + int bad = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + bad |= (count[s] != 0) & (CTable[s].nbBits == 0); + } + return !bad; +} + +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } + +FORCE_INLINE_TEMPLATE void +HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +{ + BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); +} + +#define HUF_FLUSHBITS(s) BIT_flushBits(s) + +#define HUF_FLUSHBITS_1(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) + +#define HUF_FLUSHBITS_2(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) + +FORCE_INLINE_TEMPLATE size_t +HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable) +{ + const BYTE* ip = (const BYTE*) src; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + size_t n; + BIT_CStream_t bitC; + + /* init */ + if (dstSize < 8) return 0; /* not enough space to compress */ + { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op)); + if (HUF_isError(initErr)) return 0; } + + n = srcSize & ~3; /* join to mod 4 */ + switch (srcSize & 3) + { + case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); + HUF_FLUSHBITS_2(&bitC); + /* fall-through */ + case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); + HUF_FLUSHBITS_1(&bitC); + /* fall-through */ + case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); + HUF_FLUSHBITS(&bitC); + /* fall-through */ + case 0 : /* fall-through */ + default: break; + } + + for (; n>0; n-=4) { /* note : n&3==0 at this stage */ + HUF_encodeSymbol(&bitC, ip[n- 1], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 2], CTable); + HUF_FLUSHBITS_2(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 3], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 4], CTable); + HUF_FLUSHBITS(&bitC); + } + + return BIT_closeCStream(&bitC); +} + +#if DYNAMIC_BMI2 + +static TARGET_ATTRIBUTE("bmi2") size_t +HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable) +{ + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); +} + +static size_t +HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable) +{ + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); +} + +static size_t +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable, const int bmi2) +{ + if (bmi2) { + return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); + } + return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); +} + +#else + +static size_t +HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable, const int bmi2) +{ + (void)bmi2; + return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); +} + +#endif + +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); +} + + +static size_t +HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, + const void* src, size_t srcSize, + const HUF_CElt* CTable, int bmi2) +{ + size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ + if (srcSize < 12) return 0; /* no saving possible : too small input */ + op += 6; /* jumpTable */ + + assert(op <= oend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + if (cSize==0) return 0; + assert(cSize <= 65535); + MEM_writeLE16(ostart, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + if (cSize==0) return 0; + assert(cSize <= 65535); + MEM_writeLE16(ostart+2, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); + if (cSize==0) return 0; + assert(cSize <= 65535); + MEM_writeLE16(ostart+4, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + assert(op <= oend); + assert(ip <= iend); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); + if (cSize==0) return 0; + op += cSize; + } + + return (size_t)(op-ostart); +} + +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); +} + +typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; + +static size_t HUF_compressCTable_internal( + BYTE* const ostart, BYTE* op, BYTE* const oend, + const void* src, size_t srcSize, + HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2) +{ + size_t const cSize = (nbStreams==HUF_singleStream) ? + HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2) : + HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, bmi2); + if (HUF_isError(cSize)) { return cSize; } + if (cSize==0) { return 0; } /* uncompressible */ + op += cSize; + /* check compressibility */ + assert(op >= ostart); + if ((size_t)(op-ostart) >= srcSize-1) { return 0; } + return (size_t)(op-ostart); +} + +typedef struct { + unsigned count[HUF_SYMBOLVALUE_MAX + 1]; + HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; + HUF_buildCTable_wksp_tables buildCTable_wksp; +} HUF_compress_tables_t; + +/* HUF_compress_internal() : + * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +static size_t +HUF_compress_internal (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + HUF_nbStreams_e nbStreams, + void* workSpace, size_t wkspSize, + HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, + const int bmi2) +{ + HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE); + + /* checks & inits */ + if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ + if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); + if (!srcSize) return 0; /* Uncompressed */ + if (!dstSize) return 0; /* cannot fit anything within dst budget */ + if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ + if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; + if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; + + /* Heuristic : If old table is valid, use it for small inputs */ + if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + nbStreams, oldHufTable, bmi2); + } + + /* Scan input and build symbol stats */ + { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) ); + if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ + if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ + } + + /* Check validity of previous table */ + if ( repeat + && *repeat == HUF_repeat_check + && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { + *repeat = HUF_repeat_none; + } + /* Heuristic : use existing table for small inputs */ + if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + nbStreams, oldHufTable, bmi2); + } + + /* Build Huffman Tree */ + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, + maxSymbolValue, huffLog, + &table->buildCTable_wksp, sizeof(table->buildCTable_wksp)); + CHECK_F(maxBits); + huffLog = (U32)maxBits; + /* Zero unused symbols in CTable, so we can check it for validity */ + memset(table->CTable + (maxSymbolValue + 1), 0, + sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); + } + + /* Write table description header */ + { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, table->CTable, maxSymbolValue, huffLog) ); + /* Check if using previous huffman table is beneficial */ + if (repeat && *repeat != HUF_repeat_none) { + size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); + size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + nbStreams, oldHufTable, bmi2); + } } + + /* Use the new huffman table */ + if (hSize + 12ul >= srcSize) { return 0; } + op += hSize; + if (repeat) { *repeat = HUF_repeat_none; } + if (oldHufTable) + memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ + } + return HUF_compressCTable_internal(ostart, op, oend, + src, srcSize, + nbStreams, table->CTable, bmi2); +} + + +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, HUF_singleStream, + workSpace, wkspSize, + NULL, NULL, 0, 0 /*bmi2*/); +} + +size_t HUF_compress1X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, HUF_singleStream, + workSpace, wkspSize, hufTable, + repeat, preferRepeat, bmi2); +} + +size_t HUF_compress1X (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog) +{ + unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); +} + +/* HUF_compress4X_repeat(): + * compress input using 4 streams. + * provide workspace to generate compression tables */ +size_t HUF_compress4X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, HUF_fourStreams, + workSpace, wkspSize, + NULL, NULL, 0, 0 /*bmi2*/); +} + +/* HUF_compress4X_repeat(): + * compress input using 4 streams. + * re-use an existing huffman compression table */ +size_t HUF_compress4X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, + maxSymbolValue, huffLog, HUF_fourStreams, + workSpace, wkspSize, + hufTable, repeat, preferRepeat, bmi2); +} + +size_t HUF_compress2 (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog) +{ + unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); +} + +size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress.c b/src/borg/algorithms/zstd/lib/compress/zstd_compress.c new file mode 100644 index 0000000000..3f963b1cff --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress.c @@ -0,0 +1,4278 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/*-************************************* +* Dependencies +***************************************/ +#include /* INT_MAX */ +#include /* memset */ +#include "../common/cpu.h" +#include "../common/mem.h" +#include "hist.h" /* HIST_countFast_wksp */ +#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ +#include "../common/fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" +#include "zstd_compress_internal.h" +#include "zstd_compress_sequences.h" +#include "zstd_compress_literals.h" +#include "zstd_fast.h" +#include "zstd_double_fast.h" +#include "zstd_lazy.h" +#include "zstd_opt.h" +#include "zstd_ldm.h" +#include "zstd_compress_superblock.h" + + +/*-************************************* +* Helper functions +***************************************/ +/* ZSTD_compressBound() + * Note that the result from this function is only compatible with the "normal" + * full-block strategy. + * When there are a lot of small blocks due to frequent flush in streaming mode + * the overhead of headers can make the compressed data to be larger than the + * return value of ZSTD_compressBound(). + */ +size_t ZSTD_compressBound(size_t srcSize) { + return ZSTD_COMPRESSBOUND(srcSize); +} + + +/*-************************************* +* Context memory management +***************************************/ +struct ZSTD_CDict_s { + const void* dictContent; + size_t dictContentSize; + U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */ + ZSTD_cwksp workspace; + ZSTD_matchState_t matchState; + ZSTD_compressedBlockState_t cBlockState; + ZSTD_customMem customMem; + U32 dictID; + int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ +}; /* typedef'd to ZSTD_CDict within "zstd.h" */ + +ZSTD_CCtx* ZSTD_createCCtx(void) +{ + return ZSTD_createCCtx_advanced(ZSTD_defaultCMem); +} + +static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) +{ + assert(cctx != NULL); + memset(cctx, 0, sizeof(*cctx)); + cctx->customMem = memManager; + cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); + assert(!ZSTD_isError(err)); + (void)err; + } +} + +ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem) +{ + ZSTD_STATIC_ASSERT(zcss_init==0); + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1)); + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_malloc(sizeof(ZSTD_CCtx), customMem); + if (!cctx) return NULL; + ZSTD_initCCtx(cctx, customMem); + return cctx; + } +} + +ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize) +{ + ZSTD_cwksp ws; + ZSTD_CCtx* cctx; + if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */ + if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */ + ZSTD_cwksp_init(&ws, workspace, workspaceSize); + + cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx)); + if (cctx == NULL) return NULL; + + memset(cctx, 0, sizeof(ZSTD_CCtx)); + ZSTD_cwksp_move(&cctx->workspace, &ws); + cctx->staticSize = workspaceSize; + + /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */ + if (!ZSTD_cwksp_check_available(&cctx->workspace, HUF_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL; + cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); + cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t)); + cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, HUF_WORKSPACE_SIZE); + cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + return cctx; +} + +/** + * Clears and frees all of the dictionaries in the CCtx. + */ +static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx) +{ + ZSTD_free(cctx->localDict.dictBuffer, cctx->customMem); + ZSTD_freeCDict(cctx->localDict.cdict); + memset(&cctx->localDict, 0, sizeof(cctx->localDict)); + memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); + cctx->cdict = NULL; +} + +static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict) +{ + size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0; + size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict); + return bufferSize + cdictSize; +} + +static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx) +{ + assert(cctx != NULL); + assert(cctx->staticSize == 0); + ZSTD_clearAllDicts(cctx); +#ifdef ZSTD_MULTITHREAD + ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL; +#endif + ZSTD_cwksp_free(&cctx->workspace, cctx->customMem); +} + +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support free on NULL */ + RETURN_ERROR_IF(cctx->staticSize, memory_allocation, + "not compatible with static CCtx"); + { + int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx); + ZSTD_freeCCtxContent(cctx); + if (!cctxInWorkspace) { + ZSTD_free(cctx, cctx->customMem); + } + } + return 0; +} + + +static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + return ZSTDMT_sizeof_CCtx(cctx->mtctx); +#else + (void)cctx; + return 0; +#endif +} + + +size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support sizeof on NULL */ + /* cctx may be in the workspace */ + return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx)) + + ZSTD_cwksp_sizeof(&cctx->workspace) + + ZSTD_sizeof_localDict(cctx->localDict) + + ZSTD_sizeof_mtctx(cctx); +} + +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs) +{ + return ZSTD_sizeof_CCtx(zcs); /* same object */ +} + +/* private API call, for dictBuilder only */ +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); } + +static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( + ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params cctxParams; + memset(&cctxParams, 0, sizeof(cctxParams)); + cctxParams.cParams = cParams; + cctxParams.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + assert(!ZSTD_checkCParams(cParams)); + cctxParams.fParams.contentSizeFlag = 1; + return cctxParams; +} + +static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced( + ZSTD_customMem customMem) +{ + ZSTD_CCtx_params* params; + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + params = (ZSTD_CCtx_params*)ZSTD_calloc( + sizeof(ZSTD_CCtx_params), customMem); + if (!params) { return NULL; } + params->customMem = customMem; + params->compressionLevel = ZSTD_CLEVEL_DEFAULT; + params->fParams.contentSizeFlag = 1; + return params; +} + +ZSTD_CCtx_params* ZSTD_createCCtxParams(void) +{ + return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem); +} + +size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params) +{ + if (params == NULL) { return 0; } + ZSTD_free(params, params->customMem); + return 0; +} + +size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params) +{ + return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT); +} + +size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) { + RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); + memset(cctxParams, 0, sizeof(*cctxParams)); + cctxParams->compressionLevel = compressionLevel; + cctxParams->fParams.contentSizeFlag = 1; + return 0; +} + +size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) +{ + RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!"); + FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); + memset(cctxParams, 0, sizeof(*cctxParams)); + assert(!ZSTD_checkCParams(params.cParams)); + cctxParams->cParams = params.cParams; + cctxParams->fParams = params.fParams; + cctxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + return 0; +} + +/* ZSTD_assignParamsToCCtxParams() : + * params is presumed valid at this stage */ +static ZSTD_CCtx_params ZSTD_assignParamsToCCtxParams( + const ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params) +{ + ZSTD_CCtx_params ret = *cctxParams; + assert(!ZSTD_checkCParams(params->cParams)); + ret.cParams = params->cParams; + ret.fParams = params->fParams; + ret.compressionLevel = ZSTD_CLEVEL_DEFAULT; /* should not matter, as all cParams are presumed properly defined */ + return ret; +} + +ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) +{ + ZSTD_bounds bounds = { 0, 0, 0 }; + + switch(param) + { + case ZSTD_c_compressionLevel: + bounds.lowerBound = ZSTD_minCLevel(); + bounds.upperBound = ZSTD_maxCLevel(); + return bounds; + + case ZSTD_c_windowLog: + bounds.lowerBound = ZSTD_WINDOWLOG_MIN; + bounds.upperBound = ZSTD_WINDOWLOG_MAX; + return bounds; + + case ZSTD_c_hashLog: + bounds.lowerBound = ZSTD_HASHLOG_MIN; + bounds.upperBound = ZSTD_HASHLOG_MAX; + return bounds; + + case ZSTD_c_chainLog: + bounds.lowerBound = ZSTD_CHAINLOG_MIN; + bounds.upperBound = ZSTD_CHAINLOG_MAX; + return bounds; + + case ZSTD_c_searchLog: + bounds.lowerBound = ZSTD_SEARCHLOG_MIN; + bounds.upperBound = ZSTD_SEARCHLOG_MAX; + return bounds; + + case ZSTD_c_minMatch: + bounds.lowerBound = ZSTD_MINMATCH_MIN; + bounds.upperBound = ZSTD_MINMATCH_MAX; + return bounds; + + case ZSTD_c_targetLength: + bounds.lowerBound = ZSTD_TARGETLENGTH_MIN; + bounds.upperBound = ZSTD_TARGETLENGTH_MAX; + return bounds; + + case ZSTD_c_strategy: + bounds.lowerBound = ZSTD_STRATEGY_MIN; + bounds.upperBound = ZSTD_STRATEGY_MAX; + return bounds; + + case ZSTD_c_contentSizeFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_checksumFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_dictIDFlag: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_nbWorkers: + bounds.lowerBound = 0; +#ifdef ZSTD_MULTITHREAD + bounds.upperBound = ZSTDMT_NBWORKERS_MAX; +#else + bounds.upperBound = 0; +#endif + return bounds; + + case ZSTD_c_jobSize: + bounds.lowerBound = 0; +#ifdef ZSTD_MULTITHREAD + bounds.upperBound = ZSTDMT_JOBSIZE_MAX; +#else + bounds.upperBound = 0; +#endif + return bounds; + + case ZSTD_c_overlapLog: +#ifdef ZSTD_MULTITHREAD + bounds.lowerBound = ZSTD_OVERLAPLOG_MIN; + bounds.upperBound = ZSTD_OVERLAPLOG_MAX; +#else + bounds.lowerBound = 0; + bounds.upperBound = 0; +#endif + return bounds; + + case ZSTD_c_enableLongDistanceMatching: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_ldmHashLog: + bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN; + bounds.upperBound = ZSTD_LDM_HASHLOG_MAX; + return bounds; + + case ZSTD_c_ldmMinMatch: + bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN; + bounds.upperBound = ZSTD_LDM_MINMATCH_MAX; + return bounds; + + case ZSTD_c_ldmBucketSizeLog: + bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN; + bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX; + return bounds; + + case ZSTD_c_ldmHashRateLog: + bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN; + bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX; + return bounds; + + /* experimental parameters */ + case ZSTD_c_rsyncable: + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_forceMaxWindow : + bounds.lowerBound = 0; + bounds.upperBound = 1; + return bounds; + + case ZSTD_c_format: + ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); + bounds.lowerBound = ZSTD_f_zstd1; + bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */ + return bounds; + + case ZSTD_c_forceAttachDict: + ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceCopy); + bounds.lowerBound = ZSTD_dictDefaultAttach; + bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */ + return bounds; + + case ZSTD_c_literalCompressionMode: + ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed); + bounds.lowerBound = ZSTD_lcm_auto; + bounds.upperBound = ZSTD_lcm_uncompressed; + return bounds; + + case ZSTD_c_targetCBlockSize: + bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN; + bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX; + return bounds; + + case ZSTD_c_srcSizeHint: + bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN; + bounds.upperBound = ZSTD_SRCSIZEHINT_MAX; + return bounds; + + default: + bounds.error = ERROR(parameter_unsupported); + return bounds; + } +} + +/* ZSTD_cParam_clampBounds: + * Clamps the value into the bounded range. + */ +static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value) +{ + ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); + if (ZSTD_isError(bounds.error)) return bounds.error; + if (*value < bounds.lowerBound) *value = bounds.lowerBound; + if (*value > bounds.upperBound) *value = bounds.upperBound; + return 0; +} + +#define BOUNDCHECK(cParam, val) { \ + RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \ + parameter_outOfBound, "Param out of bounds"); \ +} + + +static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) +{ + switch(param) + { + case ZSTD_c_compressionLevel: + case ZSTD_c_hashLog: + case ZSTD_c_chainLog: + case ZSTD_c_searchLog: + case ZSTD_c_minMatch: + case ZSTD_c_targetLength: + case ZSTD_c_strategy: + return 1; + + case ZSTD_c_format: + case ZSTD_c_windowLog: + case ZSTD_c_contentSizeFlag: + case ZSTD_c_checksumFlag: + case ZSTD_c_dictIDFlag: + case ZSTD_c_forceMaxWindow : + case ZSTD_c_nbWorkers: + case ZSTD_c_jobSize: + case ZSTD_c_overlapLog: + case ZSTD_c_rsyncable: + case ZSTD_c_enableLongDistanceMatching: + case ZSTD_c_ldmHashLog: + case ZSTD_c_ldmMinMatch: + case ZSTD_c_ldmBucketSizeLog: + case ZSTD_c_ldmHashRateLog: + case ZSTD_c_forceAttachDict: + case ZSTD_c_literalCompressionMode: + case ZSTD_c_targetCBlockSize: + case ZSTD_c_srcSizeHint: + default: + return 0; + } +} + +size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) +{ + DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value); + if (cctx->streamStage != zcss_init) { + if (ZSTD_isUpdateAuthorized(param)) { + cctx->cParamsChanged = 1; + } else { + RETURN_ERROR(stage_wrong, "can only set params in ctx init stage"); + } } + + switch(param) + { + case ZSTD_c_nbWorkers: + RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported, + "MT not compatible with static alloc"); + break; + + case ZSTD_c_compressionLevel: + case ZSTD_c_windowLog: + case ZSTD_c_hashLog: + case ZSTD_c_chainLog: + case ZSTD_c_searchLog: + case ZSTD_c_minMatch: + case ZSTD_c_targetLength: + case ZSTD_c_strategy: + case ZSTD_c_ldmHashRateLog: + case ZSTD_c_format: + case ZSTD_c_contentSizeFlag: + case ZSTD_c_checksumFlag: + case ZSTD_c_dictIDFlag: + case ZSTD_c_forceMaxWindow: + case ZSTD_c_forceAttachDict: + case ZSTD_c_literalCompressionMode: + case ZSTD_c_jobSize: + case ZSTD_c_overlapLog: + case ZSTD_c_rsyncable: + case ZSTD_c_enableLongDistanceMatching: + case ZSTD_c_ldmHashLog: + case ZSTD_c_ldmMinMatch: + case ZSTD_c_ldmBucketSizeLog: + case ZSTD_c_targetCBlockSize: + case ZSTD_c_srcSizeHint: + break; + + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); + } + return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value); +} + +size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, + ZSTD_cParameter param, int value) +{ + DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value); + switch(param) + { + case ZSTD_c_format : + BOUNDCHECK(ZSTD_c_format, value); + CCtxParams->format = (ZSTD_format_e)value; + return (size_t)CCtxParams->format; + + case ZSTD_c_compressionLevel : { + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); + if (value) { /* 0 : does not change current level */ + CCtxParams->compressionLevel = value; + } + if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel; + return 0; /* return type (size_t) cannot represent negative values */ + } + + case ZSTD_c_windowLog : + if (value!=0) /* 0 => use default */ + BOUNDCHECK(ZSTD_c_windowLog, value); + CCtxParams->cParams.windowLog = (U32)value; + return CCtxParams->cParams.windowLog; + + case ZSTD_c_hashLog : + if (value!=0) /* 0 => use default */ + BOUNDCHECK(ZSTD_c_hashLog, value); + CCtxParams->cParams.hashLog = (U32)value; + return CCtxParams->cParams.hashLog; + + case ZSTD_c_chainLog : + if (value!=0) /* 0 => use default */ + BOUNDCHECK(ZSTD_c_chainLog, value); + CCtxParams->cParams.chainLog = (U32)value; + return CCtxParams->cParams.chainLog; + + case ZSTD_c_searchLog : + if (value!=0) /* 0 => use default */ + BOUNDCHECK(ZSTD_c_searchLog, value); + CCtxParams->cParams.searchLog = (U32)value; + return (size_t)value; + + case ZSTD_c_minMatch : + if (value!=0) /* 0 => use default */ + BOUNDCHECK(ZSTD_c_minMatch, value); + CCtxParams->cParams.minMatch = value; + return CCtxParams->cParams.minMatch; + + case ZSTD_c_targetLength : + BOUNDCHECK(ZSTD_c_targetLength, value); + CCtxParams->cParams.targetLength = value; + return CCtxParams->cParams.targetLength; + + case ZSTD_c_strategy : + if (value!=0) /* 0 => use default */ + BOUNDCHECK(ZSTD_c_strategy, value); + CCtxParams->cParams.strategy = (ZSTD_strategy)value; + return (size_t)CCtxParams->cParams.strategy; + + case ZSTD_c_contentSizeFlag : + /* Content size written in frame header _when known_ (default:1) */ + DEBUGLOG(4, "set content size flag = %u", (value!=0)); + CCtxParams->fParams.contentSizeFlag = value != 0; + return CCtxParams->fParams.contentSizeFlag; + + case ZSTD_c_checksumFlag : + /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */ + CCtxParams->fParams.checksumFlag = value != 0; + return CCtxParams->fParams.checksumFlag; + + case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */ + DEBUGLOG(4, "set dictIDFlag = %u", (value!=0)); + CCtxParams->fParams.noDictIDFlag = !value; + return !CCtxParams->fParams.noDictIDFlag; + + case ZSTD_c_forceMaxWindow : + CCtxParams->forceWindow = (value != 0); + return CCtxParams->forceWindow; + + case ZSTD_c_forceAttachDict : { + const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value; + BOUNDCHECK(ZSTD_c_forceAttachDict, pref); + CCtxParams->attachDictPref = pref; + return CCtxParams->attachDictPref; + } + + case ZSTD_c_literalCompressionMode : { + const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value; + BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); + CCtxParams->literalCompressionMode = lcm; + return CCtxParams->literalCompressionMode; + } + + case ZSTD_c_nbWorkers : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); + return 0; +#else + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); + CCtxParams->nbWorkers = value; + return CCtxParams->nbWorkers; +#endif + + case ZSTD_c_jobSize : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); + return 0; +#else + /* Adjust to the minimum non-default value. */ + if (value != 0 && value < ZSTDMT_JOBSIZE_MIN) + value = ZSTDMT_JOBSIZE_MIN; + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), ""); + assert(value >= 0); + CCtxParams->jobSize = value; + return CCtxParams->jobSize; +#endif + + case ZSTD_c_overlapLog : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); + return 0; +#else + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); + CCtxParams->overlapLog = value; + return CCtxParams->overlapLog; +#endif + + case ZSTD_c_rsyncable : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading"); + return 0; +#else + FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), ""); + CCtxParams->rsyncable = value; + return CCtxParams->rsyncable; +#endif + + case ZSTD_c_enableLongDistanceMatching : + CCtxParams->ldmParams.enableLdm = (value!=0); + return CCtxParams->ldmParams.enableLdm; + + case ZSTD_c_ldmHashLog : + if (value!=0) /* 0 ==> auto */ + BOUNDCHECK(ZSTD_c_ldmHashLog, value); + CCtxParams->ldmParams.hashLog = value; + return CCtxParams->ldmParams.hashLog; + + case ZSTD_c_ldmMinMatch : + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_ldmMinMatch, value); + CCtxParams->ldmParams.minMatchLength = value; + return CCtxParams->ldmParams.minMatchLength; + + case ZSTD_c_ldmBucketSizeLog : + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value); + CCtxParams->ldmParams.bucketSizeLog = value; + return CCtxParams->ldmParams.bucketSizeLog; + + case ZSTD_c_ldmHashRateLog : + RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, + parameter_outOfBound, "Param out of bounds!"); + CCtxParams->ldmParams.hashRateLog = value; + return CCtxParams->ldmParams.hashRateLog; + + case ZSTD_c_targetCBlockSize : + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_targetCBlockSize, value); + CCtxParams->targetCBlockSize = value; + return CCtxParams->targetCBlockSize; + + case ZSTD_c_srcSizeHint : + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_srcSizeHint, value); + CCtxParams->srcSizeHint = value; + return CCtxParams->srcSizeHint; + + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); + } +} + +size_t ZSTD_CCtx_getParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value) +{ + return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value); +} + +size_t ZSTD_CCtxParams_getParameter( + ZSTD_CCtx_params* CCtxParams, ZSTD_cParameter param, int* value) +{ + switch(param) + { + case ZSTD_c_format : + *value = CCtxParams->format; + break; + case ZSTD_c_compressionLevel : + *value = CCtxParams->compressionLevel; + break; + case ZSTD_c_windowLog : + *value = (int)CCtxParams->cParams.windowLog; + break; + case ZSTD_c_hashLog : + *value = (int)CCtxParams->cParams.hashLog; + break; + case ZSTD_c_chainLog : + *value = (int)CCtxParams->cParams.chainLog; + break; + case ZSTD_c_searchLog : + *value = CCtxParams->cParams.searchLog; + break; + case ZSTD_c_minMatch : + *value = CCtxParams->cParams.minMatch; + break; + case ZSTD_c_targetLength : + *value = CCtxParams->cParams.targetLength; + break; + case ZSTD_c_strategy : + *value = (unsigned)CCtxParams->cParams.strategy; + break; + case ZSTD_c_contentSizeFlag : + *value = CCtxParams->fParams.contentSizeFlag; + break; + case ZSTD_c_checksumFlag : + *value = CCtxParams->fParams.checksumFlag; + break; + case ZSTD_c_dictIDFlag : + *value = !CCtxParams->fParams.noDictIDFlag; + break; + case ZSTD_c_forceMaxWindow : + *value = CCtxParams->forceWindow; + break; + case ZSTD_c_forceAttachDict : + *value = CCtxParams->attachDictPref; + break; + case ZSTD_c_literalCompressionMode : + *value = CCtxParams->literalCompressionMode; + break; + case ZSTD_c_nbWorkers : +#ifndef ZSTD_MULTITHREAD + assert(CCtxParams->nbWorkers == 0); +#endif + *value = CCtxParams->nbWorkers; + break; + case ZSTD_c_jobSize : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); +#else + assert(CCtxParams->jobSize <= INT_MAX); + *value = (int)CCtxParams->jobSize; + break; +#endif + case ZSTD_c_overlapLog : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); +#else + *value = CCtxParams->overlapLog; + break; +#endif + case ZSTD_c_rsyncable : +#ifndef ZSTD_MULTITHREAD + RETURN_ERROR(parameter_unsupported, "not compiled with multithreading"); +#else + *value = CCtxParams->rsyncable; + break; +#endif + case ZSTD_c_enableLongDistanceMatching : + *value = CCtxParams->ldmParams.enableLdm; + break; + case ZSTD_c_ldmHashLog : + *value = CCtxParams->ldmParams.hashLog; + break; + case ZSTD_c_ldmMinMatch : + *value = CCtxParams->ldmParams.minMatchLength; + break; + case ZSTD_c_ldmBucketSizeLog : + *value = CCtxParams->ldmParams.bucketSizeLog; + break; + case ZSTD_c_ldmHashRateLog : + *value = CCtxParams->ldmParams.hashRateLog; + break; + case ZSTD_c_targetCBlockSize : + *value = (int)CCtxParams->targetCBlockSize; + break; + case ZSTD_c_srcSizeHint : + *value = (int)CCtxParams->srcSizeHint; + break; + default: RETURN_ERROR(parameter_unsupported, "unknown parameter"); + } + return 0; +} + +/** ZSTD_CCtx_setParametersUsingCCtxParams() : + * just applies `params` into `cctx` + * no action is performed, parameters are merely stored. + * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx. + * This is possible even if a compression is ongoing. + * In which case, new parameters will be applied on the fly, starting with next compression job. + */ +size_t ZSTD_CCtx_setParametersUsingCCtxParams( + ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params) +{ + DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams"); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "The context is in the wrong stage!"); + RETURN_ERROR_IF(cctx->cdict, stage_wrong, + "Can't override parameters with cdict attached (some must " + "be inherited from the cdict)."); + + cctx->requestedParams = *params; + return 0; +} + +ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't set pledgedSrcSize when not in init stage."); + cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1; + return 0; +} + +/** + * Initializes the local dict using the requested parameters. + * NOTE: This does not use the pledged src size, because it may be used for more + * than one compression. + */ +static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx) +{ + ZSTD_localDict* const dl = &cctx->localDict; + ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams( + &cctx->requestedParams, ZSTD_CONTENTSIZE_UNKNOWN, dl->dictSize); + if (dl->dict == NULL) { + /* No local dictionary. */ + assert(dl->dictBuffer == NULL); + assert(dl->cdict == NULL); + assert(dl->dictSize == 0); + return 0; + } + if (dl->cdict != NULL) { + assert(cctx->cdict == dl->cdict); + /* Local dictionary already initialized. */ + return 0; + } + assert(dl->dictSize > 0); + assert(cctx->cdict == NULL); + assert(cctx->prefixDict.dict == NULL); + + dl->cdict = ZSTD_createCDict_advanced( + dl->dict, + dl->dictSize, + ZSTD_dlm_byRef, + dl->dictContentType, + cParams, + cctx->customMem); + RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed"); + cctx->cdict = dl->cdict; + return 0; +} + +size_t ZSTD_CCtx_loadDictionary_advanced( + ZSTD_CCtx* cctx, const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType) +{ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't load a dictionary when ctx is not in init stage."); + RETURN_ERROR_IF(cctx->staticSize, memory_allocation, + "no malloc for static CCtx"); + DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize); + ZSTD_clearAllDicts(cctx); /* in case one already exists */ + if (dict == NULL || dictSize == 0) /* no dictionary mode */ + return 0; + if (dictLoadMethod == ZSTD_dlm_byRef) { + cctx->localDict.dict = dict; + } else { + void* dictBuffer = ZSTD_malloc(dictSize, cctx->customMem); + RETURN_ERROR_IF(!dictBuffer, memory_allocation, "NULL pointer!"); + memcpy(dictBuffer, dict, dictSize); + cctx->localDict.dictBuffer = dictBuffer; + cctx->localDict.dict = dictBuffer; + } + cctx->localDict.dictSize = dictSize; + cctx->localDict.dictContentType = dictContentType; + return 0; +} + +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( + ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + return ZSTD_CCtx_loadDictionary_advanced( + cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); +} + +ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + return ZSTD_CCtx_loadDictionary_advanced( + cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); +} + + +size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't ref a dict when ctx not in init stage."); + /* Free the existing local cdict (if any) to save memory. */ + ZSTD_clearAllDicts(cctx); + cctx->cdict = cdict; + return 0; +} + +size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize) +{ + return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent); +} + +size_t ZSTD_CCtx_refPrefix_advanced( + ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) +{ + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't ref a prefix when ctx not in init stage."); + ZSTD_clearAllDicts(cctx); + if (prefix != NULL && prefixSize > 0) { + cctx->prefixDict.dict = prefix; + cctx->prefixDict.dictSize = prefixSize; + cctx->prefixDict.dictContentType = dictContentType; + } + return 0; +} + +/*! ZSTD_CCtx_reset() : + * Also dumps dictionary */ +size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset) +{ + if ( (reset == ZSTD_reset_session_only) + || (reset == ZSTD_reset_session_and_parameters) ) { + cctx->streamStage = zcss_init; + cctx->pledgedSrcSizePlusOne = 0; + } + if ( (reset == ZSTD_reset_parameters) + || (reset == ZSTD_reset_session_and_parameters) ) { + RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, + "Can't reset parameters only when not in init stage."); + ZSTD_clearAllDicts(cctx); + return ZSTD_CCtxParams_reset(&cctx->requestedParams); + } + return 0; +} + + +/** ZSTD_checkCParams() : + control CParam values remain within authorized range. + @return : 0, or an error code if one value is beyond authorized range */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) +{ + BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog); + BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog); + BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog); + BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog); + BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch); + BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength); + BOUNDCHECK(ZSTD_c_strategy, cParams.strategy); + return 0; +} + +/** ZSTD_clampCParams() : + * make CParam values within valid range. + * @return : valid CParams */ +static ZSTD_compressionParameters +ZSTD_clampCParams(ZSTD_compressionParameters cParams) +{ +# define CLAMP_TYPE(cParam, val, type) { \ + ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \ + if ((int)valbounds.upperBound) val=(type)bounds.upperBound; \ + } +# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned) + CLAMP(ZSTD_c_windowLog, cParams.windowLog); + CLAMP(ZSTD_c_chainLog, cParams.chainLog); + CLAMP(ZSTD_c_hashLog, cParams.hashLog); + CLAMP(ZSTD_c_searchLog, cParams.searchLog); + CLAMP(ZSTD_c_minMatch, cParams.minMatch); + CLAMP(ZSTD_c_targetLength,cParams.targetLength); + CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy); + return cParams; +} + +/** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) +{ + U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); + return hashLog - btScale; +} + +/** ZSTD_adjustCParams_internal() : + * optimize `cPar` for a specified input (`srcSize` and `dictSize`). + * mostly downsize to reduce memory consumption and initialization latency. + * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known. + * note : `srcSize==0` means 0! + * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */ +static ZSTD_compressionParameters +ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, + unsigned long long srcSize, + size_t dictSize) +{ + static const U64 minSrcSize = 513; /* (1<<9) + 1 */ + static const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1); + assert(ZSTD_checkCParams(cPar)==0); + + if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) + srcSize = minSrcSize; + + /* resize windowLog if input is small enough, to use less memory */ + if ( (srcSize < maxWindowResize) + && (dictSize < maxWindowResize) ) { + U32 const tSize = (U32)(srcSize + dictSize); + static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN; + U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN : + ZSTD_highbit32(tSize-1) + 1; + if (cPar.windowLog > srcLog) cPar.windowLog = srcLog; + } + if (cPar.hashLog > cPar.windowLog+1) cPar.hashLog = cPar.windowLog+1; + { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cycleLog > cPar.windowLog) + cPar.chainLog -= (cycleLog - cPar.windowLog); + } + + if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) + cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */ + + return cPar; +} + +ZSTD_compressionParameters +ZSTD_adjustCParams(ZSTD_compressionParameters cPar, + unsigned long long srcSize, + size_t dictSize) +{ + cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */ + if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize); +} + +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize); +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize); + +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize) +{ + ZSTD_compressionParameters cParams; + if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) { + srcSizeHint = CCtxParams->srcSizeHint; + } + cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize); + if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; + if (CCtxParams->cParams.windowLog) cParams.windowLog = CCtxParams->cParams.windowLog; + if (CCtxParams->cParams.hashLog) cParams.hashLog = CCtxParams->cParams.hashLog; + if (CCtxParams->cParams.chainLog) cParams.chainLog = CCtxParams->cParams.chainLog; + if (CCtxParams->cParams.searchLog) cParams.searchLog = CCtxParams->cParams.searchLog; + if (CCtxParams->cParams.minMatch) cParams.minMatch = CCtxParams->cParams.minMatch; + if (CCtxParams->cParams.targetLength) cParams.targetLength = CCtxParams->cParams.targetLength; + if (CCtxParams->cParams.strategy) cParams.strategy = CCtxParams->cParams.strategy; + assert(!ZSTD_checkCParams(cParams)); + /* srcSizeHint == 0 means 0 */ + return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize); +} + +static size_t +ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, + const U32 forCCtx) +{ + size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); + size_t const hSize = ((size_t)1) << cParams->hashLog; + U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; + size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; + /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't + * surrounded by redzones in ASAN. */ + size_t const tableSpace = chainSize * sizeof(U32) + + hSize * sizeof(U32) + + h3Size * sizeof(U32); + size_t const optPotentialSpace = + ZSTD_cwksp_alloc_size((MaxML+1) * sizeof(U32)) + + ZSTD_cwksp_alloc_size((MaxLL+1) * sizeof(U32)) + + ZSTD_cwksp_alloc_size((MaxOff+1) * sizeof(U32)) + + ZSTD_cwksp_alloc_size((1<strategy >= ZSTD_btopt)) + ? optPotentialSpace + : 0; + DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", + (U32)chainSize, (U32)hSize, (U32)h3Size); + return tableSpace + optSpace; +} + +size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) +{ + RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); + { ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + U32 const divider = (cParams.minMatch==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef)) + + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); + size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE); + size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); + size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 1); + + size_t const ldmSpace = ZSTD_ldm_getTableSize(params->ldmParams); + size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize) * sizeof(rawSeq)); + + /* estimateCCtxSize is for one-shot compression. So no buffers should + * be needed. However, we still allocate two 0-sized buffers, which can + * take space under ASAN. */ + size_t const bufferSpace = ZSTD_cwksp_alloc_size(0) + + ZSTD_cwksp_alloc_size(0); + + size_t const cctxSpace = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)); + + size_t const neededSpace = + cctxSpace + + entropySpace + + blockStateSpace + + ldmSpace + + ldmSeqSpace + + matchStateSize + + tokenSpace + + bufferSpace; + + DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace); + return neededSpace; + } +} + +size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); + return ZSTD_estimateCCtxSize_usingCCtxParams(¶ms); +} + +static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel) +{ + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); + return ZSTD_estimateCCtxSize_usingCParams(cParams); +} + +size_t ZSTD_estimateCCtxSize(int compressionLevel) +{ + int level; + size_t memBudget = 0; + for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { + size_t const newMB = ZSTD_estimateCCtxSize_internal(level); + if (newMB > memBudget) memBudget = newMB; + } + return memBudget; +} + +size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) +{ + RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); + { ZSTD_compressionParameters const cParams = + ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0); + size_t const CCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(params); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, (size_t)1 << cParams.windowLog); + size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize; + size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; + size_t const streamingSize = ZSTD_cwksp_alloc_size(inBuffSize) + + ZSTD_cwksp_alloc_size(outBuffSize); + + return CCtxSize + streamingSize; + } +} + +size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) +{ + ZSTD_CCtx_params const params = ZSTD_makeCCtxParamsFromCParams(cParams); + return ZSTD_estimateCStreamSize_usingCCtxParams(¶ms); +} + +static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel) +{ + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); + return ZSTD_estimateCStreamSize_usingCParams(cParams); +} + +size_t ZSTD_estimateCStreamSize(int compressionLevel) +{ + int level; + size_t memBudget = 0; + for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) { + size_t const newMB = ZSTD_estimateCStreamSize_internal(level); + if (newMB > memBudget) memBudget = newMB; + } + return memBudget; +} + +/* ZSTD_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads (non-blocking mode). + */ +ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers > 0) { + return ZSTDMT_getFrameProgression(cctx->mtctx); + } +#endif + { ZSTD_frameProgression fp; + size_t const buffered = (cctx->inBuff == NULL) ? 0 : + cctx->inBuffPos - cctx->inToCompress; + if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress); + assert(buffered <= ZSTD_BLOCKSIZE_MAX); + fp.ingested = cctx->consumedSrcSize + buffered; + fp.consumed = cctx->consumedSrcSize; + fp.produced = cctx->producedCSize; + fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */ + fp.currentJobID = 0; + fp.nbActiveWorkers = 0; + return fp; +} } + +/*! ZSTD_toFlushNow() + * Only useful for multithreading scenarios currently (nbWorkers >= 1). + */ +size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers > 0) { + return ZSTDMT_toFlushNow(cctx->mtctx); + } +#endif + (void)cctx; + return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */ +} + +static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1, + ZSTD_compressionParameters cParams2) +{ + (void)cParams1; + (void)cParams2; + assert(cParams1.windowLog == cParams2.windowLog); + assert(cParams1.chainLog == cParams2.chainLog); + assert(cParams1.hashLog == cParams2.hashLog); + assert(cParams1.searchLog == cParams2.searchLog); + assert(cParams1.minMatch == cParams2.minMatch); + assert(cParams1.targetLength == cParams2.targetLength); + assert(cParams1.strategy == cParams2.strategy); +} + +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs) +{ + int i; + for (i = 0; i < ZSTD_REP_NUM; ++i) + bs->rep[i] = repStartValue[i]; + bs->entropy.huf.repeatMode = HUF_repeat_none; + bs->entropy.fse.offcode_repeatMode = FSE_repeat_none; + bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none; + bs->entropy.fse.litlength_repeatMode = FSE_repeat_none; +} + +/*! ZSTD_invalidateMatchState() + * Invalidate all the matches in the match finder tables. + * Requires nextSrc and base to be set (can be NULL). + */ +static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms) +{ + ZSTD_window_clear(&ms->window); + + ms->nextToUpdate = ms->window.dictLimit; + ms->loadedDictEnd = 0; + ms->opt.litLengthSum = 0; /* force reset of btopt stats */ + ms->dictMatchState = NULL; +} + +/** + * Indicates whether this compression proceeds directly from user-provided + * source buffer to user-provided destination buffer (ZSTDb_not_buffered), or + * whether the context needs to buffer the input/output (ZSTDb_buffered). + */ +typedef enum { + ZSTDb_not_buffered, + ZSTDb_buffered +} ZSTD_buffered_policy_e; + +/** + * Controls, for this matchState reset, whether the tables need to be cleared / + * prepared for the coming compression (ZSTDcrp_makeClean), or whether the + * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a + * subsequent operation will overwrite the table space anyways (e.g., copying + * the matchState contents in from a CDict). + */ +typedef enum { + ZSTDcrp_makeClean, + ZSTDcrp_leaveDirty +} ZSTD_compResetPolicy_e; + +/** + * Controls, for this matchState reset, whether indexing can continue where it + * left off (ZSTDirp_continue), or whether it needs to be restarted from zero + * (ZSTDirp_reset). + */ +typedef enum { + ZSTDirp_continue, + ZSTDirp_reset +} ZSTD_indexResetPolicy_e; + +typedef enum { + ZSTD_resetTarget_CDict, + ZSTD_resetTarget_CCtx +} ZSTD_resetTarget_e; + +static size_t +ZSTD_reset_matchState(ZSTD_matchState_t* ms, + ZSTD_cwksp* ws, + const ZSTD_compressionParameters* cParams, + const ZSTD_compResetPolicy_e crp, + const ZSTD_indexResetPolicy_e forceResetIndex, + const ZSTD_resetTarget_e forWho) +{ + size_t const chainSize = (cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cParams->chainLog); + size_t const hSize = ((size_t)1) << cParams->hashLog; + U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0; + size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; + + DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); + if (forceResetIndex == ZSTDirp_reset) { + ZSTD_window_init(&ms->window); + ZSTD_cwksp_mark_tables_dirty(ws); + } + + ms->hashLog3 = hashLog3; + + ZSTD_invalidateMatchState(ms); + + assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */ + + ZSTD_cwksp_clear_tables(ws); + + DEBUGLOG(5, "reserving table space"); + /* table Space */ + ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32)); + ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32)); + ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32)); + RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, + "failed a workspace allocation in ZSTD_reset_matchState"); + + DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty); + if (crp!=ZSTDcrp_leaveDirty) { + /* reset tables only */ + ZSTD_cwksp_clean_tables(ws); + } + + /* opt parser space */ + if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) { + DEBUGLOG(4, "reserving optimal parser space"); + ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned)); + ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned)); + ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned)); + ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t)); + ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t)); + } + + ms->cParams = *cParams; + + RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation, + "failed a workspace allocation in ZSTD_reset_matchState"); + + return 0; +} + +/* ZSTD_indexTooCloseToMax() : + * minor optimization : prefer memset() rather than reduceIndex() + * which is measurably slow in some circumstances (reported for Visual Studio). + * Works when re-using a context for a lot of smallish inputs : + * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN, + * memset() will be triggered before reduceIndex(). + */ +#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB) +static int ZSTD_indexTooCloseToMax(ZSTD_window_t w) +{ + return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN); +} + +/*! ZSTD_resetCCtx_internal() : + note : `params` are assumed fully validated at this stage */ +static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, + ZSTD_CCtx_params params, + U64 const pledgedSrcSize, + ZSTD_compResetPolicy_e const crp, + ZSTD_buffered_policy_e const zbuff) +{ + ZSTD_cwksp* const ws = &zc->workspace; + DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u", + (U32)pledgedSrcSize, params.cParams.windowLog); + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + + zc->isFirstBlock = 1; + + if (params.ldmParams.enableLdm) { + /* Adjust long distance matching parameters */ + ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); + assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); + assert(params.ldmParams.hashRateLog < 32); + zc->ldmState.hashPower = ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); + } + + { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params.cParams.windowLog), pledgedSrcSize)); + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); + U32 const divider = (params.cParams.minMatch==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize) + + ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(seqDef)) + + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE)); + size_t const buffOutSize = (zbuff==ZSTDb_buffered) ? ZSTD_compressBound(blockSize)+1 : 0; + size_t const buffInSize = (zbuff==ZSTDb_buffered) ? windowSize + blockSize : 0; + size_t const matchStateSize = ZSTD_sizeof_matchState(¶ms.cParams, /* forCCtx */ 1); + size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params.ldmParams, blockSize); + + ZSTD_indexResetPolicy_e needsIndexReset = zc->initialized ? ZSTDirp_continue : ZSTDirp_reset; + + if (ZSTD_indexTooCloseToMax(zc->blockState.matchState.window)) { + needsIndexReset = ZSTDirp_reset; + } + + if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0); + + /* Check if workspace is large enough, alloc a new one if needed */ + { size_t const cctxSpace = zc->staticSize ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0; + size_t const entropySpace = ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE); + size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t)); + size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize) + ZSTD_cwksp_alloc_size(buffOutSize); + size_t const ldmSpace = ZSTD_ldm_getTableSize(params.ldmParams); + size_t const ldmSeqSpace = ZSTD_cwksp_alloc_size(maxNbLdmSeq * sizeof(rawSeq)); + + size_t const neededSpace = + cctxSpace + + entropySpace + + blockStateSpace + + ldmSpace + + ldmSeqSpace + + matchStateSize + + tokenSpace + + bufferSpace; + + int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace; + int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace); + + DEBUGLOG(4, "Need %zuKB workspace, including %zuKB for match state, and %zuKB for buffers", + neededSpace>>10, matchStateSize>>10, bufferSpace>>10); + DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize); + + if (workspaceTooSmall || workspaceWasteful) { + DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB", + ZSTD_cwksp_sizeof(ws) >> 10, + neededSpace >> 10); + + RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize"); + + needsIndexReset = ZSTDirp_reset; + + ZSTD_cwksp_free(ws, zc->customMem); + FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), ""); + + DEBUGLOG(5, "reserving object space"); + /* Statically sized space. + * entropyWorkspace never moves, + * though prev/next block swap places */ + assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t))); + zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); + RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock"); + zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); + RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); + zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, HUF_WORKSPACE_SIZE); + RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); + } } + + ZSTD_cwksp_clear(ws); + + /* init params */ + zc->appliedParams = params; + zc->blockState.matchState.cParams = params.cParams; + zc->pledgedSrcSizePlusOne = pledgedSrcSize+1; + zc->consumedSrcSize = 0; + zc->producedCSize = 0; + if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN) + zc->appliedParams.fParams.contentSizeFlag = 0; + DEBUGLOG(4, "pledged content size : %u ; flag : %u", + (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag); + zc->blockSize = blockSize; + + XXH64_reset(&zc->xxhState, 0); + zc->stage = ZSTDcs_init; + zc->dictID = 0; + + ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock); + + /* ZSTD_wildcopy() is used to copy into the literals buffer, + * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes. + */ + zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH); + zc->seqStore.maxNbLit = blockSize; + + /* buffers */ + zc->inBuffSize = buffInSize; + zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize); + zc->outBuffSize = buffOutSize; + zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); + + /* ldm bucketOffsets table */ + if (params.ldmParams.enableLdm) { + /* TODO: avoid memset? */ + size_t const ldmBucketSize = + ((size_t)1) << (params.ldmParams.hashLog - + params.ldmParams.bucketSizeLog); + zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, ldmBucketSize); + memset(zc->ldmState.bucketOffsets, 0, ldmBucketSize); + } + + /* sequences storage */ + ZSTD_referenceExternalSequences(zc, NULL, 0); + zc->seqStore.maxNbSeq = maxNbSeq; + zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); + zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); + zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE)); + zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef)); + + FORWARD_IF_ERROR(ZSTD_reset_matchState( + &zc->blockState.matchState, + ws, + ¶ms.cParams, + crp, + needsIndexReset, + ZSTD_resetTarget_CCtx), ""); + + /* ldm hash table */ + if (params.ldmParams.enableLdm) { + /* TODO: avoid memset? */ + size_t const ldmHSize = ((size_t)1) << params.ldmParams.hashLog; + zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); + memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t)); + zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq)); + zc->maxNbLdmSequences = maxNbLdmSeq; + + ZSTD_window_init(&zc->ldmState.window); + ZSTD_window_clear(&zc->ldmState.window); + zc->ldmState.loadedDictEnd = 0; + } + + DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); + zc->initialized = 1; + + return 0; + } +} + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { + int i; + for (i=0; iblockState.prevCBlock->rep[i] = 0; + assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window)); +} + +/* These are the approximate sizes for each strategy past which copying the + * dictionary tables into the working context is faster than using them + * in-place. + */ +static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = { + 8 KB, /* unused */ + 8 KB, /* ZSTD_fast */ + 16 KB, /* ZSTD_dfast */ + 32 KB, /* ZSTD_greedy */ + 32 KB, /* ZSTD_lazy */ + 32 KB, /* ZSTD_lazy2 */ + 32 KB, /* ZSTD_btlazy2 */ + 32 KB, /* ZSTD_btopt */ + 8 KB, /* ZSTD_btultra */ + 8 KB /* ZSTD_btultra2 */ +}; + +static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + U64 pledgedSrcSize) +{ + size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy]; + return ( pledgedSrcSize <= cutoff + || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN + || params->attachDictPref == ZSTD_dictForceAttach ) + && params->attachDictPref != ZSTD_dictForceCopy + && !params->forceWindow; /* dictMatchState isn't correctly + * handled in _enforceMaxDist */ +} + +static size_t +ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + { const ZSTD_compressionParameters* const cdict_cParams = &cdict->matchState.cParams; + unsigned const windowLog = params.cParams.windowLog; + assert(windowLog != 0); + /* Resize working context table params for input only, since the dict + * has its own tables. */ + /* pledgeSrcSize == 0 means 0! */ + params.cParams = ZSTD_adjustCParams_internal(*cdict_cParams, pledgedSrcSize, 0); + params.cParams.windowLog = windowLog; + FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, + ZSTDcrp_makeClean, zbuff), ""); + assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); + } + + { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc + - cdict->matchState.window.base); + const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit; + if (cdictLen == 0) { + /* don't even attach dictionaries with no contents */ + DEBUGLOG(4, "skipping attaching empty dictionary"); + } else { + DEBUGLOG(4, "attaching dictionary into context"); + cctx->blockState.matchState.dictMatchState = &cdict->matchState; + + /* prep working match state so dict matches never have negative indices + * when they are translated to the working context's index space. */ + if (cctx->blockState.matchState.window.dictLimit < cdictEnd) { + cctx->blockState.matchState.window.nextSrc = + cctx->blockState.matchState.window.base + cdictEnd; + ZSTD_window_clear(&cctx->blockState.matchState.window); + } + /* loadedDictEnd is expressed within the referential of the active context */ + cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit; + } } + + cctx->dictID = cdict->dictID; + + /* copy block state */ + memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); + + return 0; +} + +static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams; + + DEBUGLOG(4, "copying dictionary into context"); + + { unsigned const windowLog = params.cParams.windowLog; + assert(windowLog != 0); + /* Copy only compression parameters related to tables. */ + params.cParams = *cdict_cParams; + params.cParams.windowLog = windowLog; + FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, + ZSTDcrp_leaveDirty, zbuff), ""); + assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy); + assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog); + assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog); + } + + ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); + + /* copy tables */ + { size_t const chainSize = (cdict_cParams->strategy == ZSTD_fast) ? 0 : ((size_t)1 << cdict_cParams->chainLog); + size_t const hSize = (size_t)1 << cdict_cParams->hashLog; + + memcpy(cctx->blockState.matchState.hashTable, + cdict->matchState.hashTable, + hSize * sizeof(U32)); + memcpy(cctx->blockState.matchState.chainTable, + cdict->matchState.chainTable, + chainSize * sizeof(U32)); + } + + /* Zero the hashTable3, since the cdict never fills it */ + { int const h3log = cctx->blockState.matchState.hashLog3; + size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; + assert(cdict->matchState.hashLog3 == 0); + memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32)); + } + + ZSTD_cwksp_mark_tables_clean(&cctx->workspace); + + /* copy dictionary offsets */ + { ZSTD_matchState_t const* srcMatchState = &cdict->matchState; + ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; + } + + cctx->dictID = cdict->dictID; + + /* copy block state */ + memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState)); + + return 0; +} + +/* We have a choice between copying the dictionary context into the working + * context, or referencing the dictionary context from the working context + * in-place. We decide here which strategy to use. */ +static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + + DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)", + (unsigned)pledgedSrcSize); + + if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) { + return ZSTD_resetCCtx_byAttachingCDict( + cctx, cdict, *params, pledgedSrcSize, zbuff); + } else { + return ZSTD_resetCCtx_byCopyingCDict( + cctx, cdict, *params, pledgedSrcSize, zbuff); + } +} + +/*! ZSTD_copyCCtx_internal() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * The "context", in this case, refers to the hash and chain tables, + * entropy tables, and dictionary references. + * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx. + * @return : 0, or an error code */ +static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, + const ZSTD_CCtx* srcCCtx, + ZSTD_frameParameters fParams, + U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + DEBUGLOG(5, "ZSTD_copyCCtx_internal"); + RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong, + "Can't copy a ctx that's not in init stage."); + + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); + { ZSTD_CCtx_params params = dstCCtx->requestedParams; + /* Copy only compression parameters related to tables. */ + params.cParams = srcCCtx->appliedParams.cParams; + params.fParams = fParams; + ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, + ZSTDcrp_leaveDirty, zbuff); + assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog); + assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy); + assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog); + assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog); + assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3); + } + + ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace); + + /* copy tables */ + { size_t const chainSize = (srcCCtx->appliedParams.cParams.strategy == ZSTD_fast) ? 0 : ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog); + size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog; + int const h3log = srcCCtx->blockState.matchState.hashLog3; + size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0; + + memcpy(dstCCtx->blockState.matchState.hashTable, + srcCCtx->blockState.matchState.hashTable, + hSize * sizeof(U32)); + memcpy(dstCCtx->blockState.matchState.chainTable, + srcCCtx->blockState.matchState.chainTable, + chainSize * sizeof(U32)); + memcpy(dstCCtx->blockState.matchState.hashTable3, + srcCCtx->blockState.matchState.hashTable3, + h3Size * sizeof(U32)); + } + + ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace); + + /* copy dictionary offsets */ + { + const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState; + ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState; + dstMatchState->window = srcMatchState->window; + dstMatchState->nextToUpdate = srcMatchState->nextToUpdate; + dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd; + } + dstCCtx->dictID = srcCCtx->dictID; + + /* copy block state */ + memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock)); + + return 0; +} + +/*! ZSTD_copyCCtx() : + * Duplicate an existing context `srcCCtx` into another one `dstCCtx`. + * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). + * pledgedSrcSize==0 means "unknown". +* @return : 0, or an error code */ +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) +{ + ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + ZSTD_buffered_policy_e const zbuff = (ZSTD_buffered_policy_e)(srcCCtx->inBuffSize>0); + ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1); + if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; + fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN); + + return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, + fParams, pledgedSrcSize, + zbuff); +} + + +#define ZSTD_ROWSIZE 16 +/*! ZSTD_reduceTable() : + * reduce table indexes by `reducerValue`, or squash to zero. + * PreserveMark preserves "unsorted mark" for btlazy2 strategy. + * It must be set to a clear 0/1 value, to remove branch during inlining. + * Presume table size is a multiple of ZSTD_ROWSIZE + * to help auto-vectorization */ +FORCE_INLINE_TEMPLATE void +ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark) +{ + int const nbRows = (int)size / ZSTD_ROWSIZE; + int cellNb = 0; + int rowNb; + assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ + assert(size < (1U<<31)); /* can be casted to int */ + +#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) + /* To validate that the table re-use logic is sound, and that we don't + * access table space that we haven't cleaned, we re-"poison" the table + * space every time we mark it dirty. + * + * This function however is intended to operate on those dirty tables and + * re-clean them. So when this function is used correctly, we can unpoison + * the memory it operated on. This introduces a blind spot though, since + * if we now try to operate on __actually__ poisoned memory, we will not + * detect that. */ + __msan_unpoison(table, size * sizeof(U32)); +#endif + + for (rowNb=0 ; rowNb < nbRows ; rowNb++) { + int column; + for (column=0; columncParams.hashLog; + ZSTD_reduceTable(ms->hashTable, hSize, reducerValue); + } + + if (params->cParams.strategy != ZSTD_fast) { + U32 const chainSize = (U32)1 << params->cParams.chainLog; + if (params->cParams.strategy == ZSTD_btlazy2) + ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue); + else + ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue); + } + + if (ms->hashLog3) { + U32 const h3Size = (U32)1 << ms->hashLog3; + ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue); + } +} + + +/*-******************************************************* +* Block entropic compression +*********************************************************/ + +/* See doc/zstd_compression_format.md for detailed format description */ + +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +{ + const seqDef* const sequences = seqStorePtr->sequencesStart; + BYTE* const llCodeTable = seqStorePtr->llCode; + BYTE* const ofCodeTable = seqStorePtr->ofCode; + BYTE* const mlCodeTable = seqStorePtr->mlCode; + U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + U32 u; + assert(nbSeq <= seqStorePtr->maxNbSeq); + for (u=0; ulongLengthID==1) + llCodeTable[seqStorePtr->longLengthPos] = MaxLL; + if (seqStorePtr->longLengthID==2) + mlCodeTable[seqStorePtr->longLengthPos] = MaxML; +} + +/* ZSTD_useTargetCBlockSize(): + * Returns if target compressed block size param is being used. + * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize. + * Returns 1 if true, 0 otherwise. */ +static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) +{ + DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize); + return (cctxParams->targetCBlockSize != 0); +} + +/* ZSTD_compressSequences_internal(): + * actually compresses both literals and sequences */ +MEM_STATIC size_t +ZSTD_compressSequences_internal(seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + void* entropyWorkspace, size_t entropyWkspSize, + const int bmi2) +{ + const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; + ZSTD_strategy const strategy = cctxParams->cParams.strategy; + unsigned count[MaxSeq+1]; + FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable; + FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable; + FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable; + U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ + const seqDef* const sequences = seqStorePtr->sequencesStart; + const BYTE* const ofCodeTable = seqStorePtr->ofCode; + const BYTE* const llCodeTable = seqStorePtr->llCode; + const BYTE* const mlCodeTable = seqStorePtr->mlCode; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + BYTE* seqHead; + BYTE* lastNCount = NULL; + + DEBUGLOG(5, "ZSTD_compressSequences_internal (nbSeq=%zu)", nbSeq); + ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<litStart; + size_t const litSize = (size_t)(seqStorePtr->lit - literals); + size_t const cSize = ZSTD_compressLiterals( + &prevEntropy->huf, &nextEntropy->huf, + cctxParams->cParams.strategy, + ZSTD_disableLiteralsCompression(cctxParams), + op, dstCapacity, + literals, litSize, + entropyWorkspace, entropyWkspSize, + bmi2); + FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); + assert(cSize <= dstCapacity); + op += cSize; + } + + /* Sequences Header */ + RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, + dstSize_tooSmall, "Can't fit seq hdr in output buf!"); + if (nbSeq < 128) { + *op++ = (BYTE)nbSeq; + } else if (nbSeq < LONGNBSEQ) { + op[0] = (BYTE)((nbSeq>>8) + 0x80); + op[1] = (BYTE)nbSeq; + op+=2; + } else { + op[0]=0xFF; + MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)); + op+=3; + } + assert(op <= oend); + if (nbSeq==0) { + /* Copy the old tables over as if we repeated them */ + memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse)); + return (size_t)(op - ostart); + } + + /* seqHead : flags for FSE encoding type */ + seqHead = op++; + assert(op <= oend); + + /* convert length/distances into codes */ + ZSTD_seqToCodes(seqStorePtr); + /* build CTable for Literal Lengths */ + { unsigned max = MaxLL; + size_t const mostFrequent = HIST_countFast_wksp(count, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + DEBUGLOG(5, "Building LL table"); + nextEntropy->fse.litlength_repeatMode = prevEntropy->fse.litlength_repeatMode; + LLtype = ZSTD_selectEncodingType(&nextEntropy->fse.litlength_repeatMode, + count, max, mostFrequent, nbSeq, + LLFSELog, prevEntropy->fse.litlengthCTable, + LL_defaultNorm, LL_defaultNormLog, + ZSTD_defaultAllowed, strategy); + assert(set_basic < set_compressed && set_rle < set_compressed); + assert(!(LLtype < set_compressed && nextEntropy->fse.litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), + CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, + count, max, llCodeTable, nbSeq, + LL_defaultNorm, LL_defaultNormLog, MaxLL, + prevEntropy->fse.litlengthCTable, + sizeof(prevEntropy->fse.litlengthCTable), + entropyWorkspace, entropyWkspSize); + FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); + if (LLtype == set_compressed) + lastNCount = op; + op += countSize; + assert(op <= oend); + } } + /* build CTable for Offsets */ + { unsigned max = MaxOff; + size_t const mostFrequent = HIST_countFast_wksp( + count, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + DEBUGLOG(5, "Building OF table"); + nextEntropy->fse.offcode_repeatMode = prevEntropy->fse.offcode_repeatMode; + Offtype = ZSTD_selectEncodingType(&nextEntropy->fse.offcode_repeatMode, + count, max, mostFrequent, nbSeq, + OffFSELog, prevEntropy->fse.offcodeCTable, + OF_defaultNorm, OF_defaultNormLog, + defaultPolicy, strategy); + assert(!(Offtype < set_compressed && nextEntropy->fse.offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), + CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, + count, max, ofCodeTable, nbSeq, + OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + prevEntropy->fse.offcodeCTable, + sizeof(prevEntropy->fse.offcodeCTable), + entropyWorkspace, entropyWkspSize); + FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); + if (Offtype == set_compressed) + lastNCount = op; + op += countSize; + assert(op <= oend); + } } + /* build CTable for MatchLengths */ + { unsigned max = MaxML; + size_t const mostFrequent = HIST_countFast_wksp( + count, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */ + DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); + nextEntropy->fse.matchlength_repeatMode = prevEntropy->fse.matchlength_repeatMode; + MLtype = ZSTD_selectEncodingType(&nextEntropy->fse.matchlength_repeatMode, + count, max, mostFrequent, nbSeq, + MLFSELog, prevEntropy->fse.matchlengthCTable, + ML_defaultNorm, ML_defaultNormLog, + ZSTD_defaultAllowed, strategy); + assert(!(MLtype < set_compressed && nextEntropy->fse.matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable( + op, (size_t)(oend - op), + CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, + count, max, mlCodeTable, nbSeq, + ML_defaultNorm, ML_defaultNormLog, MaxML, + prevEntropy->fse.matchlengthCTable, + sizeof(prevEntropy->fse.matchlengthCTable), + entropyWorkspace, entropyWkspSize); + FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); + if (MLtype == set_compressed) + lastNCount = op; + op += countSize; + assert(op <= oend); + } } + + *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); + + { size_t const bitstreamSize = ZSTD_encodeSequences( + op, (size_t)(oend - op), + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, + longOffsets, bmi2); + FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); + op += bitstreamSize; + assert(op <= oend); + /* zstd versions <= 1.3.4 mistakenly report corruption when + * FSE_readNCount() receives a buffer < 4 bytes. + * Fixed by https://github.com/facebook/zstd/pull/1146. + * This can happen when the last set_compressed table present is 2 + * bytes and the bitstream is only one byte. + * In this exceedingly rare case, we will simply emit an uncompressed + * block, since it isn't worth optimizing. + */ + if (lastNCount && (op - lastNCount) < 4) { + /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ + assert(op - lastNCount == 3); + DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " + "emitting an uncompressed block."); + return 0; + } + } + + DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart)); + return (size_t)(op - ostart); +} + +MEM_STATIC size_t +ZSTD_compressSequences(seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + size_t srcSize, + void* entropyWorkspace, size_t entropyWkspSize, + int bmi2) +{ + size_t const cSize = ZSTD_compressSequences_internal( + seqStorePtr, prevEntropy, nextEntropy, cctxParams, + dst, dstCapacity, + entropyWorkspace, entropyWkspSize, bmi2); + if (cSize == 0) return 0; + /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block. + * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block. + */ + if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) + return 0; /* block not compressed */ + FORWARD_IF_ERROR(cSize, "ZSTD_compressSequences_internal failed"); + + /* Check compressibility */ + { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy); + if (cSize >= maxCSize) return 0; /* block not compressed */ + } + + return cSize; +} + +/* ZSTD_selectBlockCompressor() : + * Not static, but internal use only (used by long distance matcher) + * assumption : strat is a valid strategy */ +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode) +{ + static const ZSTD_blockCompressor blockCompressor[3][ZSTD_STRATEGY_MAX+1] = { + { ZSTD_compressBlock_fast /* default for 0 */, + ZSTD_compressBlock_fast, + ZSTD_compressBlock_doubleFast, + ZSTD_compressBlock_greedy, + ZSTD_compressBlock_lazy, + ZSTD_compressBlock_lazy2, + ZSTD_compressBlock_btlazy2, + ZSTD_compressBlock_btopt, + ZSTD_compressBlock_btultra, + ZSTD_compressBlock_btultra2 }, + { ZSTD_compressBlock_fast_extDict /* default for 0 */, + ZSTD_compressBlock_fast_extDict, + ZSTD_compressBlock_doubleFast_extDict, + ZSTD_compressBlock_greedy_extDict, + ZSTD_compressBlock_lazy_extDict, + ZSTD_compressBlock_lazy2_extDict, + ZSTD_compressBlock_btlazy2_extDict, + ZSTD_compressBlock_btopt_extDict, + ZSTD_compressBlock_btultra_extDict, + ZSTD_compressBlock_btultra_extDict }, + { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */, + ZSTD_compressBlock_fast_dictMatchState, + ZSTD_compressBlock_doubleFast_dictMatchState, + ZSTD_compressBlock_greedy_dictMatchState, + ZSTD_compressBlock_lazy_dictMatchState, + ZSTD_compressBlock_lazy2_dictMatchState, + ZSTD_compressBlock_btlazy2_dictMatchState, + ZSTD_compressBlock_btopt_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState, + ZSTD_compressBlock_btultra_dictMatchState } + }; + ZSTD_blockCompressor selectedCompressor; + ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1); + + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); + selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; + assert(selectedCompressor != NULL); + return selectedCompressor; +} + +static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr, + const BYTE* anchor, size_t lastLLSize) +{ + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; +} + +void ZSTD_resetSeqStore(seqStore_t* ssPtr) +{ + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; + ssPtr->longLengthID = 0; +} + +typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e; + +static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) +{ + ZSTD_matchState_t* const ms = &zc->blockState.matchState; + DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize); + assert(srcSize <= ZSTD_BLOCKSIZE_MAX); + /* Assert that we have correctly flushed the ctx params into the ms's copy */ + ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams); + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) { + ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch); + return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */ + } + ZSTD_resetSeqStore(&(zc->seqStore)); + /* required for optimal parser to read stats from dictionary */ + ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy; + /* tell the optimal parser how we expect to compress literals */ + ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode; + /* a gap between an attached dict and the current window is not safe, + * they must remain adjacent, + * and when that stops being the case, the dict must be unset */ + assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit); + + /* limited update after a very long match */ + { const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const U32 current = (U32)(istart-base); + if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */ + if (current > ms->nextToUpdate + 384) + ms->nextToUpdate = current - MIN(192, (U32)(current - ms->nextToUpdate - 384)); + } + + /* select and store sequences */ + { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms); + size_t lastLLSize; + { int i; + for (i = 0; i < ZSTD_REP_NUM; ++i) + zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; + } + if (zc->externSeqStore.pos < zc->externSeqStore.size) { + assert(!zc->appliedParams.ldmParams.enableLdm); + /* Updates ldmSeqStore.pos */ + lastLLSize = + ZSTD_ldm_blockCompress(&zc->externSeqStore, + ms, &zc->seqStore, + zc->blockState.nextCBlock->rep, + src, srcSize); + assert(zc->externSeqStore.pos <= zc->externSeqStore.size); + } else if (zc->appliedParams.ldmParams.enableLdm) { + rawSeqStore_t ldmSeqStore = {NULL, 0, 0, 0}; + + ldmSeqStore.seq = zc->ldmSequences; + ldmSeqStore.capacity = zc->maxNbLdmSequences; + /* Updates ldmSeqStore.size */ + FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore, + &zc->appliedParams.ldmParams, + src, srcSize), ""); + /* Updates ldmSeqStore.pos */ + lastLLSize = + ZSTD_ldm_blockCompress(&ldmSeqStore, + ms, &zc->seqStore, + zc->blockState.nextCBlock->rep, + src, srcSize); + assert(ldmSeqStore.pos == ldmSeqStore.size); + } else { /* not long range mode */ + ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy, dictMode); + lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize); + } + { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize; + ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize); + } } + return ZSTDbss_compress; +} + +static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) +{ + const seqStore_t* seqStore = ZSTD_getSeqStore(zc); + const seqDef* seqs = seqStore->sequencesStart; + size_t seqsSize = seqStore->sequences - seqs; + + ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex]; + size_t i; size_t position; int repIdx; + + assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences); + for (i = 0, position = 0; i < seqsSize; ++i) { + outSeqs[i].offset = seqs[i].offset; + outSeqs[i].litLength = seqs[i].litLength; + outSeqs[i].matchLength = seqs[i].matchLength + MINMATCH; + + if (i == seqStore->longLengthPos) { + if (seqStore->longLengthID == 1) { + outSeqs[i].litLength += 0x10000; + } else if (seqStore->longLengthID == 2) { + outSeqs[i].matchLength += 0x10000; + } + } + + if (outSeqs[i].offset <= ZSTD_REP_NUM) { + outSeqs[i].rep = outSeqs[i].offset; + repIdx = (unsigned int)i - outSeqs[i].offset; + + if (outSeqs[i].litLength == 0) { + if (outSeqs[i].offset < 3) { + --repIdx; + } else { + repIdx = (unsigned int)i - 1; + } + ++outSeqs[i].rep; + } + assert(repIdx >= -3); + outSeqs[i].offset = repIdx >= 0 ? outSeqs[repIdx].offset : repStartValue[-repIdx - 1]; + if (outSeqs[i].rep == 4) { + --outSeqs[i].offset; + } + } else { + outSeqs[i].offset -= ZSTD_REP_NUM; + } + + position += outSeqs[i].litLength; + outSeqs[i].matchPos = (unsigned int)position; + position += outSeqs[i].matchLength; + } + zc->seqCollector.seqIndex += seqsSize; +} + +size_t ZSTD_getSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, + size_t outSeqsSize, const void* src, size_t srcSize) +{ + const size_t dstCapacity = ZSTD_compressBound(srcSize); + void* dst = ZSTD_malloc(dstCapacity, ZSTD_defaultCMem); + SeqCollector seqCollector; + + RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!"); + + seqCollector.collectSequences = 1; + seqCollector.seqStart = outSeqs; + seqCollector.seqIndex = 0; + seqCollector.maxSequences = outSeqsSize; + zc->seqCollector = seqCollector; + + ZSTD_compress2(zc, dst, dstCapacity, src, srcSize); + ZSTD_free(dst, ZSTD_defaultCMem); + return zc->seqCollector.seqIndex; +} + +/* Returns true if the given block is a RLE block */ +static int ZSTD_isRLE(const BYTE *ip, size_t length) { + size_t i; + if (length < 2) return 1; + for (i = 1; i < length; ++i) { + if (ip[0] != ip[i]) return 0; + } + return 1; +} + +/* Returns true if the given block may be RLE. + * This is just a heuristic based on the compressibility. + * It may return both false positives and false negatives. + */ +static int ZSTD_maybeRLE(seqStore_t const* seqStore) +{ + size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart); + size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart); + + return nbSeqs < 4 && nbLits < 10; +} + +static void ZSTD_confirmRepcodesAndEntropyTables(ZSTD_CCtx* zc) +{ + ZSTD_compressedBlockState_t* const tmp = zc->blockState.prevCBlock; + zc->blockState.prevCBlock = zc->blockState.nextCBlock; + zc->blockState.nextCBlock = tmp; +} + +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, U32 frame) +{ + /* This the upper bound for the length of an rle block. + * This isn't the actual upper bound. Finding the real threshold + * needs further investigation. + */ + const U32 rleMaxLength = 25; + size_t cSize; + const BYTE* ip = (const BYTE*)src; + BYTE* op = (BYTE*)dst; + DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", + (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, + (unsigned)zc->blockState.matchState.nextToUpdate); + + { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); + FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); + if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; } + } + + if (zc->seqCollector.collectSequences) { + ZSTD_copyBlockSequences(zc); + return 0; + } + + /* encode sequences and literals */ + cSize = ZSTD_compressSequences(&zc->seqStore, + &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + dst, dstCapacity, + srcSize, + zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */, + zc->bmi2); + + if (frame && + /* We don't want to emit our first block as a RLE even if it qualifies because + * doing so will cause the decoder (cli only) to throw a "should consume all input error." + * This is only an issue for zstd <= v1.4.3 + */ + !zc->isFirstBlock && + cSize < rleMaxLength && + ZSTD_isRLE(ip, srcSize)) + { + cSize = 1; + op[0] = ip[0]; + } + +out: + if (!ZSTD_isError(cSize) && cSize > 1) { + ZSTD_confirmRepcodesAndEntropyTables(zc); + } + /* We check that dictionaries have offset codes available for the first + * block. After the first block, the offcode table might not have large + * enough codes to represent the offsets in the data. + */ + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + return cSize; +} + +static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const size_t bss, U32 lastBlock) +{ + DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()"); + if (bss == ZSTDbss_compress) { + if (/* We don't want to emit our first block as a RLE even if it qualifies because + * doing so will cause the decoder (cli only) to throw a "should consume all input error." + * This is only an issue for zstd <= v1.4.3 + */ + !zc->isFirstBlock && + ZSTD_maybeRLE(&zc->seqStore) && + ZSTD_isRLE((BYTE const*)src, srcSize)) + { + return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock); + } + /* Attempt superblock compression. + * + * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the + * standard ZSTD_compressBound(). This is a problem, because even if we have + * space now, taking an extra byte now could cause us to run out of space later + * and violate ZSTD_compressBound(). + * + * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize. + * + * In order to respect ZSTD_compressBound() we must attempt to emit a raw + * uncompressed block in these cases: + * * cSize == 0: Return code for an uncompressed block. + * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize). + * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of + * output space. + * * cSize >= blockBound(srcSize): We have expanded the block too much so + * emit an uncompressed block. + */ + { + size_t const cSize = ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock); + if (cSize != ERROR(dstSize_tooSmall)) { + size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed"); + if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) { + ZSTD_confirmRepcodesAndEntropyTables(zc); + return cSize; + } + } + } + } + + DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()"); + /* Superblock compression failed, attempt to emit a single no compress block. + * The decoder will be able to stream this block since it is uncompressed. + */ + return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock); +} + +static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastBlock) +{ + size_t cSize = 0; + const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); + DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)", + (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize); + FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); + + cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed"); + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + + return cSize; +} + +static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + void const* ip, + void const* iend) +{ + if (ZSTD_window_needOverflowCorrection(ms->window, iend)) { + U32 const maxDist = (U32)1 << params->cParams.windowLog; + U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy); + U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip); + ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30); + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31); + ZSTD_cwksp_mark_tables_dirty(ws); + ZSTD_reduceIndex(ms, params, correction); + ZSTD_cwksp_mark_tables_clean(ws); + if (ms->nextToUpdate < correction) ms->nextToUpdate = 0; + else ms->nextToUpdate -= correction; + /* invalidate dictionaries on overflow correction */ + ms->loadedDictEnd = 0; + ms->dictMatchState = NULL; + } +} + +/*! ZSTD_compress_frameChunk() : +* Compress a chunk of data into one or multiple blocks. +* All blocks will be terminated, all input will be consumed. +* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. +* Frame is supposed already started (header already produced) +* @return : compressed size, or an error code +*/ +static size_t ZSTD_compress_frameChunk (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastFrameChunk) +{ + size_t blockSize = cctx->blockSize; + size_t remaining = srcSize; + const BYTE* ip = (const BYTE*)src; + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog; + + assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX); + + DEBUGLOG(5, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize); + if (cctx->appliedParams.fParams.checksumFlag && srcSize) + XXH64_update(&cctx->xxhState, src, srcSize); + + while (remaining) { + ZSTD_matchState_t* const ms = &cctx->blockState.matchState; + U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE, + dstSize_tooSmall, + "not enough space to store compressed block"); + if (remaining < blockSize) blockSize = remaining; + + ZSTD_overflowCorrectIfNeeded( + ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); + ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + + /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ + if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; + + { size_t cSize; + if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) { + cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed"); + assert(cSize > 0); + assert(cSize <= blockSize + ZSTD_blockHeaderSize); + } else { + cSize = ZSTD_compressBlock_internal(cctx, + op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, + ip, blockSize, 1 /* frame */); + FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed"); + + if (cSize == 0) { /* block is not compressible */ + cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock); + FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); + } else { + U32 const cBlockHeader = cSize == 1 ? + lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) : + lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(op, cBlockHeader); + cSize += ZSTD_blockHeaderSize; + } + } + + + ip += blockSize; + assert(remaining >= blockSize); + remaining -= blockSize; + op += cSize; + assert(dstCapacity >= cSize); + dstCapacity -= cSize; + cctx->isFirstBlock = 0; + DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u", + (unsigned)cSize); + } } + + if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; + return (size_t)(op-ostart); +} + + +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, + const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID) +{ BYTE* const op = (BYTE*)dst; + U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ + U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */ + U32 const checksumFlag = params->fParams.checksumFlag>0; + U32 const windowSize = (U32)1 << params->cParams.windowLog; + U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); + BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); + U32 const fcsCode = params->fParams.contentSizeFlag ? + (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */ + BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); + size_t pos=0; + + assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)); + RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall, + "dst buf is too small to fit worst-case frame header size."); + DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u", + !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode); + + if (params->format == ZSTD_f_zstd1) { + MEM_writeLE32(dst, ZSTD_MAGICNUMBER); + pos = 4; + } + op[pos++] = frameHeaderDescriptionByte; + if (!singleSegment) op[pos++] = windowLogByte; + switch(dictIDSizeCode) + { + default: assert(0); /* impossible */ + case 0 : break; + case 1 : op[pos] = (BYTE)(dictID); pos++; break; + case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; + case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; + } + switch(fcsCode) + { + default: assert(0); /* impossible */ + case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; + case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; + case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; + case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; + } + return pos; +} + +/* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapacity` is too small (stage != ZSTDcs_init, stage_wrong, + "wrong cctx stage"); + RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm, + parameter_unsupported, + "incompatible with ldm"); + cctx->externSeqStore.seq = seq; + cctx->externSeqStore.size = nbSeq; + cctx->externSeqStore.capacity = nbSeq; + cctx->externSeqStore.pos = 0; + return 0; +} + + +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 frame, U32 lastFrameChunk) +{ + ZSTD_matchState_t* const ms = &cctx->blockState.matchState; + size_t fhSize = 0; + + DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u", + cctx->stage, (unsigned)srcSize); + RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong, + "missing init (ZSTD_compressBegin)"); + + if (frame && (cctx->stage==ZSTDcs_init)) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, + cctx->pledgedSrcSizePlusOne-1, cctx->dictID); + FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); + assert(fhSize <= dstCapacity); + dstCapacity -= fhSize; + dst = (char*)dst + fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (!srcSize) return fhSize; /* do not generate an empty block if no input */ + + if (!ZSTD_window_update(&ms->window, src, srcSize)) { + ms->nextToUpdate = ms->window.dictLimit; + } + if (cctx->appliedParams.ldmParams.enableLdm) { + ZSTD_window_update(&cctx->ldmState.window, src, srcSize); + } + + if (!frame) { + /* overflow check and correction for block mode */ + ZSTD_overflowCorrectIfNeeded( + ms, &cctx->workspace, &cctx->appliedParams, + src, (BYTE const*)src + srcSize); + } + + DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize); + { size_t const cSize = frame ? + ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : + ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */); + FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed"); + cctx->consumedSrcSize += srcSize; + cctx->producedCSize += (cSize + fhSize); + assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); + if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); + RETURN_ERROR_IF( + cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne, + srcSize_wrong, + "error : pledgedSrcSize = %u, while realSrcSize >= %u", + (unsigned)cctx->pledgedSrcSizePlusOne-1, + (unsigned)cctx->consumedSrcSize); + } + return cSize + fhSize; + } +} + +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize); + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */); +} + + +size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx) +{ + ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams; + assert(!ZSTD_checkCParams(cParams)); + return MIN (ZSTD_BLOCKSIZE_MAX, (U32)1 << cParams.windowLog); +} + +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize); + { size_t const blockSizeMax = ZSTD_getBlockSize(cctx); + RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); } + + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */); +} + +/*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ +static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + const void* src, size_t srcSize, + ZSTD_dictTableLoadMethod_e dtlm) +{ + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + + ZSTD_window_update(&ms->window, src, srcSize); + ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base); + + if (params->ldmParams.enableLdm && ls != NULL) { + ZSTD_window_update(&ls->window, src, srcSize); + ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base); + } + + /* Assert that we the ms params match the params we're being given */ + ZSTD_assertEqualCParams(params->cParams, ms->cParams); + + if (srcSize <= HASH_READ_SIZE) return 0; + + while (iend - ip > HASH_READ_SIZE) { + size_t const remaining = (size_t)(iend - ip); + size_t const chunk = MIN(remaining, ZSTD_CHUNKSIZE_MAX); + const BYTE* const ichunk = ip + chunk; + + ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, ichunk); + + if (params->ldmParams.enableLdm && ls != NULL) + ZSTD_ldm_fillHashTable(ls, (const BYTE*)src, (const BYTE*)src + srcSize, ¶ms->ldmParams); + + switch(params->cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable(ms, ichunk, dtlm); + break; + case ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, ichunk, dtlm); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + if (chunk >= HASH_READ_SIZE) + ZSTD_insertAndFindFirstIndex(ms, ichunk-HASH_READ_SIZE); + break; + + case ZSTD_btlazy2: /* we want the dictionary table fully sorted */ + case ZSTD_btopt: + case ZSTD_btultra: + case ZSTD_btultra2: + if (chunk >= HASH_READ_SIZE) + ZSTD_updateTree(ms, ichunk-HASH_READ_SIZE, ichunk); + break; + + default: + assert(0); /* not possible : not a valid strategy id */ + } + + ip = ichunk; + } + + ms->nextToUpdate = (U32)(iend - ms->window.base); + return 0; +} + + +/* Dictionaries that assign zero probability to symbols that show up causes problems + when FSE encoding. Refuse dictionaries that assign zero probability to symbols + that we may encounter during compression. + NOTE: This behavior is not standard and could be improved in the future. */ +static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { + U32 s; + RETURN_ERROR_IF(dictMaxSymbolValue < maxSymbolValue, dictionary_corrupted, "dict fse tables don't have all symbols"); + for (s = 0; s <= maxSymbolValue; ++s) { + RETURN_ERROR_IF(normalizedCounter[s] == 0, dictionary_corrupted, "dict fse tables don't have all symbols"); + } + return 0; +} + +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + short* offcodeNCount, unsigned* offcodeMaxValue, + const void* const dict, size_t dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */ + const BYTE* const dictEnd = dictPtr + dictSize; + dictPtr += 8; + bs->entropy.huf.repeatMode = HUF_repeat_check; + + { unsigned maxSymbolValue = 255; + unsigned hasZeroWeights = 1; + size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr, + dictEnd-dictPtr, &hasZeroWeights); + + /* We only set the loaded table as valid if it contains all non-zero + * weights. Otherwise, we set it to check */ + if (!hasZeroWeights) + bs->entropy.huf.repeatMode = HUF_repeat_valid; + + RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, ""); + dictPtr += hufHeaderSize; + } + + { unsigned offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); + /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ + /* fill all offset symbols to avoid garbage at end of table */ + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( + bs->entropy.fse.offcodeCTable, + offcodeNCount, MaxOff, offcodeLog, + workspace, HUF_WORKSPACE_SIZE)), + dictionary_corrupted, ""); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); + /* Every match length code must have non-zero probability */ + FORWARD_IF_ERROR( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML), ""); + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( + bs->entropy.fse.matchlengthCTable, + matchlengthNCount, matchlengthMaxValue, matchlengthLog, + workspace, HUF_WORKSPACE_SIZE)), + dictionary_corrupted, ""); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); + /* Every literal length code must have non-zero probability */ + FORWARD_IF_ERROR( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL), ""); + RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp( + bs->entropy.fse.litlengthCTable, + litlengthNCount, litlengthMaxValue, litlengthLog, + workspace, HUF_WORKSPACE_SIZE)), + dictionary_corrupted, ""); + dictPtr += litlengthHeaderSize; + } + + RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); + bs->rep[0] = MEM_readLE32(dictPtr+0); + bs->rep[1] = MEM_readLE32(dictPtr+4); + bs->rep[2] = MEM_readLE32(dictPtr+8); + dictPtr += 12; + + return dictPtr - (const BYTE*)dict; +} + +/* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format + */ +/*! ZSTD_loadZstdDictionary() : + * @return : dictID, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed >= 8 + */ +static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs, + ZSTD_matchState_t* ms, + ZSTD_cwksp* ws, + ZSTD_CCtx_params const* params, + const void* dict, size_t dictSize, + ZSTD_dictTableLoadMethod_e dtlm, + void* workspace) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + short offcodeNCount[MaxOff+1]; + unsigned offcodeMaxValue = MaxOff; + size_t dictID; + size_t eSize; + + ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<= 8); + assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY); + + dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ ); + eSize = ZSTD_loadCEntropy(bs, workspace, offcodeNCount, &offcodeMaxValue, dict, dictSize); + FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed"); + dictPtr += eSize; + + { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + U32 offcodeMax = MaxOff; + if (dictContentSize <= ((U32)-1) - 128 KB) { + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ + offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ + } + /* All offset values <= dictContentSize + 128 KB must be representable */ + FORWARD_IF_ERROR(ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)), ""); + /* All repCodes must be <= dictContentSize and != 0*/ + { U32 u; + for (u=0; u<3; u++) { + RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, ""); + RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, ""); + } } + + bs->entropy.fse.offcode_repeatMode = FSE_repeat_valid; + bs->entropy.fse.matchlength_repeatMode = FSE_repeat_valid; + bs->entropy.fse.litlength_repeatMode = FSE_repeat_valid; + FORWARD_IF_ERROR(ZSTD_loadDictionaryContent( + ms, NULL, ws, params, dictPtr, dictContentSize, dtlm), ""); + return dictID; + } +} + +/** ZSTD_compress_insertDictionary() : +* @return : dictID, or an error code */ +static size_t +ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs, + ZSTD_matchState_t* ms, + ldmState_t* ls, + ZSTD_cwksp* ws, + const ZSTD_CCtx_params* params, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + void* workspace) +{ + DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize); + if ((dict==NULL) || (dictSize<8)) { + RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); + return 0; + } + + ZSTD_reset_compressedBlockState(bs); + + /* dict restricted modes */ + if (dictContentType == ZSTD_dct_rawContent) + return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm); + + if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) { + if (dictContentType == ZSTD_dct_auto) { + DEBUGLOG(4, "raw content dictionary detected"); + return ZSTD_loadDictionaryContent( + ms, ls, ws, params, dict, dictSize, dtlm); + } + RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, ""); + assert(0); /* impossible */ + } + + /* dict as full zstd dictionary */ + return ZSTD_loadZstdDictionary( + bs, ms, ws, params, dict, dictSize, dtlm, workspace); +} + +#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB) +#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6) + +/*! ZSTD_compressBegin_internal() : + * @return : 0, or an error code */ +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, U64 pledgedSrcSize, + ZSTD_buffered_policy_e zbuff) +{ + DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog); + /* params are supposed to be fully validated at this point */ + assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + if ( (cdict) + && (cdict->dictContentSize > 0) + && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF + || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER + || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN + || cdict->compressionLevel == 0) + && (params->attachDictPref != ZSTD_dictForceLoad) ) { + return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff); + } + + FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, *params, pledgedSrcSize, + ZSTDcrp_makeClean, zbuff) , ""); + { size_t const dictID = cdict ? + ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, &cctx->blockState.matchState, + &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent, + cdict->dictContentSize, dictContentType, dtlm, + cctx->entropyWorkspace) + : ZSTD_compress_insertDictionary( + cctx->blockState.prevCBlock, &cctx->blockState.matchState, + &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize, + dictContentType, dtlm, cctx->entropyWorkspace); + FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); + assert(dictID <= UINT_MAX); + cctx->dictID = (U32)dictID; + } + return 0; +} + +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog); + /* compression parameters verification and optimization */ + FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , ""); + return ZSTD_compressBegin_internal(cctx, + dict, dictSize, dictContentType, dtlm, + cdict, + params, pledgedSrcSize, + ZSTDb_not_buffered); +} + +/*! ZSTD_compressBegin_advanced() : +* @return : 0, or an error code */ +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms); + return ZSTD_compressBegin_advanced_internal(cctx, + dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, + NULL /*cdict*/, + &cctxParams, pledgedSrcSize); +} + +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms); + DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, + &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered); +} + +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); +} + + +/*! ZSTD_writeEpilogue() : +* Ends a frame. +* @return : nb of bytes written into dst (or an error code) */ +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + size_t fhSize = 0; + + DEBUGLOG(4, "ZSTD_writeEpilogue"); + RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing"); + + /* special case : empty frame */ + if (cctx->stage == ZSTDcs_init) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0); + FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed"); + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (cctx->stage != ZSTDcs_ending) { + /* write one last empty block, make it the "last" block */ + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; + RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue"); + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + } + + if (cctx->appliedParams.fParams.checksumFlag) { + U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); + RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum"); + DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum); + MEM_writeLE32(op, checksum); + op += 4; + } + + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ + return op-ostart; +} + +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t endResult; + size_t const cSize = ZSTD_compressContinue_internal(cctx, + dst, dstCapacity, src, srcSize, + 1 /* frame mode */, 1 /* last chunk */); + FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed"); + endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); + FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed"); + assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0)); + if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */ + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1); + DEBUGLOG(4, "end of frame : controlling src size"); + RETURN_ERROR_IF( + cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1, + srcSize_wrong, + "error : pledgedSrcSize = %u, while realSrcSize = %u", + (unsigned)cctx->pledgedSrcSizePlusOne-1, + (unsigned)cctx->consumedSrcSize); + } + return cSize + endResult; +} + + +static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + const ZSTD_parameters* params) +{ + ZSTD_CCtx_params const cctxParams = + ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, params); + DEBUGLOG(4, "ZSTD_compress_internal"); + return ZSTD_compress_advanced_internal(cctx, + dst, dstCapacity, + src, srcSize, + dict, dictSize, + &cctxParams); +} + +size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + DEBUGLOG(4, "ZSTD_compress_advanced"); + FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), ""); + return ZSTD_compress_internal(cctx, + dst, dstCapacity, + src, srcSize, + dict, dictSize, + ¶ms); +} + +/* Internal */ +size_t ZSTD_compress_advanced_internal( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + const ZSTD_CCtx_params* params) +{ + DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize); + FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, + dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL, + params, srcSize, ZSTDb_not_buffered) , ""); + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0); + ZSTD_CCtx_params cctxParams = ZSTD_assignParamsToCCtxParams(&cctx->requestedParams, ¶ms); + DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize); + assert(params.fParams.contentSizeFlag == 1); + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctxParams); +} + +size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize); + assert(cctx != NULL); + return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel); +} + +size_t ZSTD_compress(void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + size_t result; + ZSTD_CCtx ctxBody; + ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem); + result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel); + ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */ + return result; +} + + +/* ===== Dictionary API ===== */ + +/*! ZSTD_estimateCDictSize_advanced() : + * Estimate amount of memory that will be needed to create a dictionary with following arguments */ +size_t ZSTD_estimateCDictSize_advanced( + size_t dictSize, ZSTD_compressionParameters cParams, + ZSTD_dictLoadMethod_e dictLoadMethod) +{ + DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict)); + return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); +} + +size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); + return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy); +} + +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support sizeof on NULL */ + DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict)); + /* cdict may be in the workspace */ + return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict)) + + ZSTD_cwksp_sizeof(&cdict->workspace); +} + +static size_t ZSTD_initCDict_internal( + ZSTD_CDict* cdict, + const void* dictBuffer, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams) +{ + DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType); + assert(!ZSTD_checkCParams(cParams)); + cdict->matchState.cParams = cParams; + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) { + cdict->dictContent = dictBuffer; + } else { + void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*))); + RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!"); + cdict->dictContent = internalBuffer; + memcpy(internalBuffer, dictBuffer, dictSize); + } + cdict->dictContentSize = dictSize; + + cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE); + + + /* Reset the state to no dictionary */ + ZSTD_reset_compressedBlockState(&cdict->cBlockState); + FORWARD_IF_ERROR(ZSTD_reset_matchState( + &cdict->matchState, + &cdict->workspace, + &cParams, + ZSTDcrp_makeClean, + ZSTDirp_reset, + ZSTD_resetTarget_CDict), ""); + /* (Maybe) load the dictionary + * Skips loading the dictionary if it is < 8 bytes. + */ + { ZSTD_CCtx_params params; + memset(¶ms, 0, sizeof(params)); + params.compressionLevel = ZSTD_CLEVEL_DEFAULT; + params.fParams.contentSizeFlag = 1; + params.cParams = cParams; + { size_t const dictID = ZSTD_compress_insertDictionary( + &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace, + ¶ms, cdict->dictContent, cdict->dictContentSize, + dictContentType, ZSTD_dtlm_full, cdict->entropyWorkspace); + FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed"); + assert(dictID <= (size_t)(U32)-1); + cdict->dictID = (U32)dictID; + } + } + + return 0; +} + +ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams, ZSTD_customMem customMem) +{ + DEBUGLOG(3, "ZSTD_createCDict_advanced, mode %u", (unsigned)dictContentType); + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + { size_t const workspaceSize = + ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + + ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))); + void* const workspace = ZSTD_malloc(workspaceSize, customMem); + ZSTD_cwksp ws; + ZSTD_CDict* cdict; + + if (!workspace) { + ZSTD_free(workspace, customMem); + return NULL; + } + + ZSTD_cwksp_init(&ws, workspace, workspaceSize); + + cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); + assert(cdict != NULL); + ZSTD_cwksp_move(&cdict->workspace, &ws); + cdict->customMem = customMem; + cdict->compressionLevel = 0; /* signals advanced API usage */ + + if (ZSTD_isError( ZSTD_initCDict_internal(cdict, + dictBuffer, dictSize, + dictLoadMethod, dictContentType, + cParams) )) { + ZSTD_freeCDict(cdict); + return NULL; + } + + return cdict; + } +} + +ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); + ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byCopy, ZSTD_dct_auto, + cParams, ZSTD_defaultCMem); + if (cdict) + cdict->compressionLevel = compressionLevel == 0 ? ZSTD_CLEVEL_DEFAULT : compressionLevel; + return cdict; +} + +ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize); + return ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byRef, ZSTD_dct_auto, + cParams, ZSTD_defaultCMem); +} + +size_t ZSTD_freeCDict(ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = cdict->customMem; + int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict); + ZSTD_cwksp_free(&cdict->workspace, cMem); + if (!cdictInWorkspace) { + ZSTD_free(cdict, cMem); + } + return 0; + } +} + +/*! ZSTD_initStaticCDict_advanced() : + * Generate a digested dictionary in provided memory area. + * workspace: The memory area to emplace the dictionary into. + * Provided pointer must 8-bytes aligned. + * It must outlive dictionary usage. + * workspaceSize: Use ZSTD_estimateCDictSize() + * to determine how large workspace must be. + * cParams : use ZSTD_getCParams() to transform a compression level + * into its relevants cParams. + * @return : pointer to ZSTD_CDict*, or NULL if error (size too small) + * Note : there is no corresponding "free" function. + * Since workspace was allocated externally, it must be freed externally. + */ +const ZSTD_CDict* ZSTD_initStaticCDict( + void* workspace, size_t workspaceSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_compressionParameters cParams) +{ + size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, /* forCCtx */ 0); + size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 + : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*)))) + + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) + + matchStateSize; + ZSTD_CDict* cdict; + + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + + { + ZSTD_cwksp ws; + ZSTD_cwksp_init(&ws, workspace, workspaceSize); + cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict)); + if (cdict == NULL) return NULL; + ZSTD_cwksp_move(&cdict->workspace, &ws); + } + + DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u", + (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize)); + if (workspaceSize < neededSize) return NULL; + + if (ZSTD_isError( ZSTD_initCDict_internal(cdict, + dict, dictSize, + dictLoadMethod, dictContentType, + cParams) )) + return NULL; + + return cdict; +} + +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict) +{ + assert(cdict != NULL); + return cdict->matchState.cParams; +} + +/* ZSTD_compressBegin_usingCDict_advanced() : + * cdict must be != NULL */ +size_t ZSTD_compressBegin_usingCDict_advanced( + ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, + ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_advanced"); + RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!"); + { ZSTD_CCtx_params params = cctx->requestedParams; + params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF + || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER + || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN + || cdict->compressionLevel == 0 ) + && (params.attachDictPref != ZSTD_dictForceLoad) ? + ZSTD_getCParamsFromCDict(cdict) + : ZSTD_getCParams(cdict->compressionLevel, + pledgedSrcSize, + cdict->dictContentSize); + /* Increase window log to fit the entire dictionary and source if the + * source size is known. Limit the increase to 19, which is the + * window log for compression level 1 with the largest source size. + */ + if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) { + U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19); + U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1; + params.cParams.windowLog = MAX(params.cParams.windowLog, limitedSrcLog); + } + params.fParams = fParams; + return ZSTD_compressBegin_internal(cctx, + NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, + cdict, + ¶ms, pledgedSrcSize, + ZSTDb_not_buffered); + } +} + +/* ZSTD_compressBegin_usingCDict() : + * pledgedSrcSize=0 means "unknown" + * if pledgedSrcSize>0, it will enable contentSizeFlag */ +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + DEBUGLOG(4, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u", !fParams.noDictIDFlag); + return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN); +} + +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, ZSTD_frameParameters fParams) +{ + FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */ + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +/*! ZSTD_compress_usingCDict() : + * Compression using a digested Dictionary. + * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + * Note that compression parameters are decided at CDict creation time + * while frame parameters are hardcoded */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict) +{ + ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ }; + return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams); +} + + + +/* ****************************************************************** +* Streaming +********************************************************************/ + +ZSTD_CStream* ZSTD_createCStream(void) +{ + DEBUGLOG(3, "ZSTD_createCStream"); + return ZSTD_createCStream_advanced(ZSTD_defaultCMem); +} + +ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize) +{ + return ZSTD_initStaticCCtx(workspace, workspaceSize); +} + +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) +{ /* CStream and CCtx are now same object */ + return ZSTD_createCCtx_advanced(customMem); +} + +size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +{ + return ZSTD_freeCCtx(zcs); /* same object */ +} + + + +/*====== Initialization ======*/ + +size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; } + +size_t ZSTD_CStreamOutSize(void) +{ + return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; +} + +static size_t ZSTD_resetCStream_internal(ZSTD_CStream* cctx, + const void* const dict, size_t const dictSize, ZSTD_dictContentType_e const dictContentType, + const ZSTD_CDict* const cdict, + ZSTD_CCtx_params params, unsigned long long const pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_resetCStream_internal"); + /* Finalize the compression parameters */ + params.cParams = ZSTD_getCParamsFromCCtxParams(¶ms, pledgedSrcSize, dictSize); + /* params are supposed to be fully validated at this point */ + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + + FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx, + dict, dictSize, dictContentType, ZSTD_dtlm_fast, + cdict, + ¶ms, pledgedSrcSize, + ZSTDb_buffered) , ""); + + cctx->inToCompress = 0; + cctx->inBuffPos = 0; + cctx->inBuffTarget = cctx->blockSize + + (cctx->blockSize == pledgedSrcSize); /* for small input: avoid automatic flush on reaching end of block, since it would require to add a 3-bytes null block to end frame */ + cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0; + cctx->streamStage = zcss_load; + cctx->frameEnded = 0; + return 0; /* ready to go */ +} + +/* ZSTD_resetCStream(): + * pledgedSrcSize == 0 means "unknown" */ +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss) +{ + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; + DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + return 0; +} + +/*! ZSTD_initCStream_internal() : + * Note : for lib/compress only. Used by zstdmt_compress.c. + * Assumption 1 : params are valid + * Assumption 2 : either dict, or cdict, is defined, not both */ +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_internal"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); + zcs->requestedParams = *params; + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + if (dict) { + FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); + } else { + /* Dictionary is cleared if !cdict */ + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); + } + return 0; +} + +/* ZSTD_initCStream_usingCDict_advanced() : + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */ +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, + const ZSTD_CDict* cdict, + ZSTD_frameParameters fParams, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + zcs->requestedParams.fParams = fParams; + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); + return 0; +} + +/* note : cdict must outlive compression session */ +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingCDict"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , ""); + return 0; +} + + +/* ZSTD_initCStream_advanced() : + * pledgedSrcSize must be exact. + * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN. + * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */ +size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pss) +{ + /* for compatibility with older programs relying on this behavior. + * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN. + * This line will be removed in the future. + */ + U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; + DEBUGLOG(4, "ZSTD_initCStream_advanced"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , ""); + zcs->requestedParams = ZSTD_assignParamsToCCtxParams(&zcs->requestedParams, ¶ms); + FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); + return 0; +} + +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_initCStream_usingDict"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , ""); + return 0; +} + +size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss) +{ + /* temporary : 0 interpreted as "unknown" during transition period. + * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN. + * 0 will be interpreted as "empty" in the future. + */ + U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss; + DEBUGLOG(4, "ZSTD_initCStream_srcSize"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , ""); + return 0; +} + +size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel) +{ + DEBUGLOG(4, "ZSTD_initCStream"); + FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , ""); + FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , ""); + return 0; +} + +/*====== Compression ======*/ + +static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx) +{ + size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos; + if (hintInSize==0) hintInSize = cctx->blockSize; + return hintInSize; +} + +/** ZSTD_compressStream_generic(): + * internal function for all *compressStream*() variants + * non-static, because can be called from zstdmt_compress.c + * @return : hint size for next input */ +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective const flushMode) +{ + const char* const istart = (const char*)input->src; + const char* const iend = input->size != 0 ? istart + input->size : istart; + const char* ip = input->pos != 0 ? istart + input->pos : istart; + char* const ostart = (char*)output->dst; + char* const oend = output->size != 0 ? ostart + output->size : ostart; + char* op = output->pos != 0 ? ostart + output->pos : ostart; + U32 someMoreWork = 1; + + /* check expectations */ + DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%u", (unsigned)flushMode); + assert(zcs->inBuff != NULL); + assert(zcs->inBuffSize > 0); + assert(zcs->outBuff != NULL); + assert(zcs->outBuffSize > 0); + assert(output->pos <= output->size); + assert(input->pos <= input->size); + + while (someMoreWork) { + switch(zcs->streamStage) + { + case zcss_init: + RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!"); + + case zcss_load: + if ( (flushMode == ZSTD_e_end) + && ((size_t)(oend-op) >= ZSTD_compressBound(iend-ip)) /* enough dstCapacity */ + && (zcs->inBuffPos == 0) ) { + /* shortcut to compression pass directly into output buffer */ + size_t const cSize = ZSTD_compressEnd(zcs, + op, oend-op, ip, iend-ip); + DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize); + FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed"); + ip = iend; + op += cSize; + zcs->frameEnded = 1; + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + someMoreWork = 0; break; + } + /* complete loading into inBuffer */ + { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy( + zcs->inBuff + zcs->inBuffPos, toLoad, + ip, iend-ip); + zcs->inBuffPos += loaded; + if (loaded != 0) + ip += loaded; + if ( (flushMode == ZSTD_e_continue) + && (zcs->inBuffPos < zcs->inBuffTarget) ) { + /* not enough input to fill full block : stop here */ + someMoreWork = 0; break; + } + if ( (flushMode == ZSTD_e_flush) + && (zcs->inBuffPos == zcs->inToCompress) ) { + /* empty */ + someMoreWork = 0; break; + } + } + /* compress current block (note : this stage cannot be stopped in the middle) */ + DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode); + { void* cDst; + size_t cSize; + size_t const iSize = zcs->inBuffPos - zcs->inToCompress; + size_t oSize = oend-op; + unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend); + if (oSize >= ZSTD_compressBound(iSize)) + cDst = op; /* compress into output buffer, to skip flush stage */ + else + cDst = zcs->outBuff, oSize = zcs->outBuffSize; + cSize = lastBlock ? + ZSTD_compressEnd(zcs, cDst, oSize, + zcs->inBuff + zcs->inToCompress, iSize) : + ZSTD_compressContinue(zcs, cDst, oSize, + zcs->inBuff + zcs->inToCompress, iSize); + FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed"); + zcs->frameEnded = lastBlock; + /* prepare next block */ + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; + if (zcs->inBuffTarget > zcs->inBuffSize) + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; + DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u", + (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize); + if (!lastBlock) + assert(zcs->inBuffTarget <= zcs->inBuffSize); + zcs->inToCompress = zcs->inBuffPos; + if (cDst == op) { /* no need to flush */ + op += cSize; + if (zcs->frameEnded) { + DEBUGLOG(5, "Frame completed directly in outBuffer"); + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + } + break; + } + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->streamStage = zcss_flush; /* pass-through to flush stage */ + } + /* fall-through */ + case zcss_flush: + DEBUGLOG(5, "flush stage"); + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op), + zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u", + (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed); + if (flushed) + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush!=flushed) { + /* flush not fully completed, presumably because dst is too small */ + assert(op==oend); + someMoreWork = 0; + break; + } + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + if (zcs->frameEnded) { + DEBUGLOG(5, "Frame completed on flush"); + someMoreWork = 0; + ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); + break; + } + zcs->streamStage = zcss_load; + break; + } + + default: /* impossible */ + assert(0); + } + } + + input->pos = ip - istart; + output->pos = op - ostart; + if (zcs->frameEnded) return 0; + return ZSTD_nextInputSizeHint(zcs); +} + +static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx) +{ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers >= 1) { + assert(cctx->mtctx != NULL); + return ZSTDMT_nextInputSizeHint(cctx->mtctx); + } +#endif + return ZSTD_nextInputSizeHint(cctx); + +} + +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , ""); + return ZSTD_nextInputSizeHint_MTorST(zcs); +} + + +size_t ZSTD_compressStream2( ZSTD_CCtx* cctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp) +{ + DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp); + /* check conditions */ + RETURN_ERROR_IF(output->pos > output->size, GENERIC, "invalid buffer"); + RETURN_ERROR_IF(input->pos > input->size, GENERIC, "invalid buffer"); + assert(cctx!=NULL); + + /* transparent initialization stage */ + if (cctx->streamStage == zcss_init) { + ZSTD_CCtx_params params = cctx->requestedParams; + ZSTD_prefixDict const prefixDict = cctx->prefixDict; + FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */ + memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */ + assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */ + DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage"); + if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = input->size + 1; /* auto-fix pledgedSrcSize */ + params.cParams = ZSTD_getCParamsFromCCtxParams( + &cctx->requestedParams, cctx->pledgedSrcSizePlusOne-1, 0 /*dictSize*/); + + +#ifdef ZSTD_MULTITHREAD + if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) { + params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */ + } + if (params.nbWorkers > 0) { + /* mt context creation */ + if (cctx->mtctx == NULL) { + DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u", + params.nbWorkers); + cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem); + RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!"); + } + /* mt compression */ + DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers); + FORWARD_IF_ERROR( ZSTDMT_initCStream_internal( + cctx->mtctx, + prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, + cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , ""); + cctx->streamStage = zcss_load; + cctx->appliedParams.nbWorkers = params.nbWorkers; + } else +#endif + { FORWARD_IF_ERROR( ZSTD_resetCStream_internal(cctx, + prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, + cctx->cdict, + params, cctx->pledgedSrcSizePlusOne-1) , ""); + assert(cctx->streamStage == zcss_load); + assert(cctx->appliedParams.nbWorkers == 0); + } } + /* end of transparent initialization stage */ + + /* compression stage */ +#ifdef ZSTD_MULTITHREAD + if (cctx->appliedParams.nbWorkers > 0) { + int const forceMaxProgress = (endOp == ZSTD_e_flush || endOp == ZSTD_e_end); + size_t flushMin; + assert(forceMaxProgress || endOp == ZSTD_e_continue /* Protection for a new flush type */); + if (cctx->cParamsChanged) { + ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams); + cctx->cParamsChanged = 0; + } + do { + flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp); + if ( ZSTD_isError(flushMin) + || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */ + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); + } + FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed"); + } while (forceMaxProgress && flushMin != 0 && output->pos < output->size); + DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic"); + /* Either we don't require maximum forward progress, we've finished the + * flush, or we are out of output space. + */ + assert(!forceMaxProgress || flushMin == 0 || output->pos == output->size); + return flushMin; + } +#endif + FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , ""); + DEBUGLOG(5, "completed ZSTD_compressStream2"); + return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */ +} + +size_t ZSTD_compressStream2_simpleArgs ( + ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos, + ZSTD_EndDirective endOp) +{ + ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; + ZSTD_inBuffer input = { src, srcSize, *srcPos }; + /* ZSTD_compressStream2() will check validity of dstPos and srcPos */ + size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; +} + +size_t ZSTD_compress2(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize); + ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only); + { size_t oPos = 0; + size_t iPos = 0; + size_t const result = ZSTD_compressStream2_simpleArgs(cctx, + dst, dstCapacity, &oPos, + src, srcSize, &iPos, + ZSTD_e_end); + FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed"); + if (result != 0) { /* compression not completed, due to lack of output space */ + assert(oPos == dstCapacity); + RETURN_ERROR(dstSize_tooSmall, ""); + } + assert(iPos == srcSize); /* all input is expected consumed */ + return oPos; + } +} + +/*====== Finalize ======*/ + +/*! ZSTD_flushStream() : + * @return : amount of data remaining to flush */ +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + ZSTD_inBuffer input = { NULL, 0, 0 }; + return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush); +} + + +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + ZSTD_inBuffer input = { NULL, 0, 0 }; + size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end); + FORWARD_IF_ERROR( remainingToFlush , "ZSTD_compressStream2 failed"); + if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */ + /* single thread mode : attempt to calculate remaining to flush more precisely */ + { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE; + size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4); + size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize; + DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush); + return toFlush; + } +} + + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_MAX_CLEVEL 22 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } +int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" - for any srcSize > 256 KB */ + /* W, C, H, S, L, TL, strat */ + { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ + { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ + { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ + { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ + { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ + { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */ + { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */ + { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */ + { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ + { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ + { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */ + { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ + { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ + { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ + { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ + { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ + { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ + { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ + { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ + { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ + { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ + { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ + { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ + { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ + { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ + { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ + { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ + { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ + { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ + { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ + { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ + { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ + { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ + { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ + { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ + { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ + { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ + { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ + { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ + { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ + { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ + { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ + { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ + { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +}; + +/*! ZSTD_getCParams_internal() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown. + * Use dictSize == 0 for unknown or unused. */ +static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{ + int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN; + size_t const addedSize = unknown && dictSize > 0 ? 500 : 0; + U64 const rSize = unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize; + U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); + int row = compressionLevel; + DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel); + if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */ + if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */ + if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL; + { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row]; + if (compressionLevel < 0) cp.targetLength = (unsigned)(-compressionLevel); /* acceleration factor */ + /* refine parameters based on srcSize & dictSize */ + return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize); + } +} + +/*! ZSTD_getCParams() : + * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize. + * Size values are optional, provide 0 if not known or unused */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) +{ + if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize); +} + +/*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ +static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { + ZSTD_parameters params; + ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize); + DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel); + memset(¶ms, 0, sizeof(params)); + params.cParams = cParams; + params.fParams.contentSizeFlag = 1; + return params; +} + +/*! ZSTD_getParams() : + * same idea as ZSTD_getCParams() + * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`). + * Fields of `ZSTD_frameParameters` are set to default values */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) { + if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_internal.h b/src/borg/algorithms/zstd/lib/compress/zstd_compress_internal.h new file mode 100644 index 0000000000..db73f6ce21 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_internal.h @@ -0,0 +1,1125 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* This header contains definitions + * that shall **only** be used by modules within lib/compress. + */ + +#ifndef ZSTD_COMPRESS_H +#define ZSTD_COMPRESS_H + +/*-************************************* +* Dependencies +***************************************/ +#include "../common/zstd_internal.h" +#include "zstd_cwksp.h" +#ifdef ZSTD_MULTITHREAD +# include "zstdmt_compress.h" +#endif + +#if defined (__cplusplus) +extern "C" { +#endif + + +/*-************************************* +* Constants +***************************************/ +#define kSearchStrength 8 +#define HASH_READ_SIZE 8 +#define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index ZSTD_DUBT_UNSORTED_MARK==1 means "unsorted". + It could be confused for a real successor at index "1", if sorted as larger than its predecessor. + It's not a big deal though : candidate will just be sorted again. + Additionally, candidate position 1 will be lost. + But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss. + The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy. + This constant is required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */ + + +/*-************************************* +* Context memory management +***************************************/ +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; +typedef enum { zcss_init=0, zcss_load, zcss_flush } ZSTD_cStreamStage; + +typedef struct ZSTD_prefixDict_s { + const void* dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; +} ZSTD_prefixDict; + +typedef struct { + void* dictBuffer; + void const* dict; + size_t dictSize; + ZSTD_dictContentType_e dictContentType; + ZSTD_CDict* cdict; +} ZSTD_localDict; + +typedef struct { + U32 CTable[HUF_CTABLE_SIZE_U32(255)]; + HUF_repeat repeatMode; +} ZSTD_hufCTables_t; + +typedef struct { + FSE_CTable offcodeCTable[FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)]; + FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)]; + FSE_CTable litlengthCTable[FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)]; + FSE_repeat offcode_repeatMode; + FSE_repeat matchlength_repeatMode; + FSE_repeat litlength_repeatMode; +} ZSTD_fseCTables_t; + +typedef struct { + ZSTD_hufCTables_t huf; + ZSTD_fseCTables_t fse; +} ZSTD_entropyCTables_t; + +typedef struct { + U32 off; + U32 len; +} ZSTD_match_t; + +typedef struct { + int price; + U32 off; + U32 mlen; + U32 litlen; + U32 rep[ZSTD_REP_NUM]; +} ZSTD_optimal_t; + +typedef enum { zop_dynamic=0, zop_predef } ZSTD_OptPrice_e; + +typedef struct { + /* All tables are allocated inside cctx->workspace by ZSTD_resetCCtx_internal() */ + unsigned* litFreq; /* table of literals statistics, of size 256 */ + unsigned* litLengthFreq; /* table of litLength statistics, of size (MaxLL+1) */ + unsigned* matchLengthFreq; /* table of matchLength statistics, of size (MaxML+1) */ + unsigned* offCodeFreq; /* table of offCode statistics, of size (MaxOff+1) */ + ZSTD_match_t* matchTable; /* list of found matches, of size ZSTD_OPT_NUM+1 */ + ZSTD_optimal_t* priceTable; /* All positions tracked by optimal parser, of size ZSTD_OPT_NUM+1 */ + + U32 litSum; /* nb of literals */ + U32 litLengthSum; /* nb of litLength codes */ + U32 matchLengthSum; /* nb of matchLength codes */ + U32 offCodeSum; /* nb of offset codes */ + U32 litSumBasePrice; /* to compare to log2(litfreq) */ + U32 litLengthSumBasePrice; /* to compare to log2(llfreq) */ + U32 matchLengthSumBasePrice;/* to compare to log2(mlfreq) */ + U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ + ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ + const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ + ZSTD_literalCompressionMode_e literalCompressionMode; +} optState_t; + +typedef struct { + ZSTD_entropyCTables_t entropy; + U32 rep[ZSTD_REP_NUM]; +} ZSTD_compressedBlockState_t; + +typedef struct { + BYTE const* nextSrc; /* next block here to continue on current prefix */ + BYTE const* base; /* All regular indexes relative to this position */ + BYTE const* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ + U32 lowLimit; /* below that point, no more valid data */ +} ZSTD_window_t; + +typedef struct ZSTD_matchState_t ZSTD_matchState_t; +struct ZSTD_matchState_t { + ZSTD_window_t window; /* State for window round buffer management */ + U32 loadedDictEnd; /* index of end of dictionary, within context's referential. + * When loadedDictEnd != 0, a dictionary is in use, and still valid. + * This relies on a mechanism to set loadedDictEnd=0 when dictionary is no longer within distance. + * Such mechanism is provided within ZSTD_window_enforceMaxDist() and ZSTD_checkDictValidity(). + * When dict referential is copied into active context (i.e. not attached), + * loadedDictEnd == dictSize, since referential starts from zero. + */ + U32 nextToUpdate; /* index from which to continue table update */ + U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */ + U32* hashTable; + U32* hashTable3; + U32* chainTable; + optState_t opt; /* optimal parser state */ + const ZSTD_matchState_t* dictMatchState; + ZSTD_compressionParameters cParams; +}; + +typedef struct { + ZSTD_compressedBlockState_t* prevCBlock; + ZSTD_compressedBlockState_t* nextCBlock; + ZSTD_matchState_t matchState; +} ZSTD_blockState_t; + +typedef struct { + U32 offset; + U32 checksum; +} ldmEntry_t; + +typedef struct { + ZSTD_window_t window; /* State for the window round buffer management */ + ldmEntry_t* hashTable; + U32 loadedDictEnd; + BYTE* bucketOffsets; /* Next position in bucket to insert entry */ + U64 hashPower; /* Used to compute the rolling hash. + * Depends on ldmParams.minMatchLength */ +} ldmState_t; + +typedef struct { + U32 enableLdm; /* 1 if enable long distance matching */ + U32 hashLog; /* Log size of hashTable */ + U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ + U32 minMatchLength; /* Minimum match length */ + U32 hashRateLog; /* Log number of entries to skip */ + U32 windowLog; /* Window log for the LDM */ +} ldmParams_t; + +typedef struct { + U32 offset; + U32 litLength; + U32 matchLength; +} rawSeq; + +typedef struct { + rawSeq* seq; /* The start of the sequences */ + size_t pos; /* The position where reading stopped. <= size. */ + size_t size; /* The number of sequences. <= capacity. */ + size_t capacity; /* The capacity starting from `seq` pointer */ +} rawSeqStore_t; + +typedef struct { + int collectSequences; + ZSTD_Sequence* seqStart; + size_t seqIndex; + size_t maxSequences; +} SeqCollector; + +struct ZSTD_CCtx_params_s { + ZSTD_format_e format; + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; + + int compressionLevel; + int forceWindow; /* force back-references to respect limit of + * 1< 63) ? ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; +} + +/* ZSTD_MLcode() : + * note : mlBase = matchLength - MINMATCH; + * because it's the format it's stored in seqStore->sequences */ +MEM_STATIC U32 ZSTD_MLcode(U32 mlBase) +{ + static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; + static const U32 ML_deltaCode = 36; + return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; +} + +typedef struct repcodes_s { + U32 rep[3]; +} repcodes_t; + +MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) +{ + repcodes_t newReps; + if (offset >= ZSTD_REP_NUM) { /* full offset */ + newReps.rep[2] = rep[1]; + newReps.rep[1] = rep[0]; + newReps.rep[0] = offset - ZSTD_REP_MOVE; + } else { /* repcode */ + U32 const repCode = offset + ll0; + if (repCode > 0) { /* note : if repCode==0, no change */ + U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; + newReps.rep[1] = rep[0]; + newReps.rep[0] = currentOffset; + } else { /* repCode == 0 */ + memcpy(&newReps, rep, sizeof(newReps)); + } + } + return newReps; +} + +/* ZSTD_cParam_withinBounds: + * @return 1 if value is within cParam bounds, + * 0 otherwise */ +MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value) +{ + ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); + if (ZSTD_isError(bounds.error)) return 0; + if (value < bounds.lowerBound) return 0; + if (value > bounds.upperBound) return 0; + return 1; +} + +/* ZSTD_noCompressBlock() : + * Writes uncompressed block to dst buffer from given src. + * Returns the size of the block */ +MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock) +{ + U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3); + RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity, + dstSize_tooSmall, "dst buf too small for uncompressed block"); + MEM_writeLE24(dst, cBlockHeader24); + memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); + return ZSTD_blockHeaderSize + srcSize; +} + +MEM_STATIC size_t ZSTD_rleCompressBlock (void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock) +{ + BYTE* const op = (BYTE*)dst; + U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3); + RETURN_ERROR_IF(dstCapacity < 4, dstSize_tooSmall, ""); + MEM_writeLE24(op, cBlockHeader); + op[3] = src; + return 4; +} + + +/* ZSTD_minGain() : + * minimum compression required + * to generate a compress block or a compressed literals section. + * note : use same formula for both situations */ +MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) +{ + U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6; + ZSTD_STATIC_ASSERT(ZSTD_btultra == 8); + assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat)); + return (srcSize >> minlog) + 2; +} + +MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams) +{ + switch (cctxParams->literalCompressionMode) { + case ZSTD_lcm_huffman: + return 0; + case ZSTD_lcm_uncompressed: + return 1; + default: + assert(0 /* impossible: pre-validated */); + /* fall-through */ + case ZSTD_lcm_auto: + return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); + } +} + +/*! ZSTD_safecopyLiterals() : + * memcpy() function that won't read beyond more than WILDCOPY_OVERLENGTH bytes past ilimit_w. + * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single + * large copies. + */ +static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) { + assert(iend > ilimit_w); + if (ip <= ilimit_w) { + ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); + op += ilimit_w - ip; + ip = ilimit_w; + } + while (ip < iend) *op++ = *ip++; +} + +/*! ZSTD_storeSeq() : + * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t. + * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes). + * `mlBase` : matchLength - MINMATCH + * Allowed to overread literals up to litLimit. +*/ +HINT_INLINE UNUSED_ATTR +void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase) +{ + BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; + BYTE const* const litEnd = literals + litLength; +#if defined(DEBUGLEVEL) && (DEBUGLEVEL >= 6) + static const BYTE* g_start = NULL; + if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ + { U32 const pos = (U32)((const BYTE*)literals - g_start); + DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", + pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode); + } +#endif + assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); + /* copy Literals */ + assert(seqStorePtr->maxNbLit <= 128 KB); + assert(seqStorePtr->lit + litLength <= seqStorePtr->litStart + seqStorePtr->maxNbLit); + assert(literals + litLength <= litLimit); + if (litEnd <= litLimit_w) { + /* Common case we can use wildcopy. + * First copy 16 bytes, because literals are likely short. + */ + assert(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(seqStorePtr->lit, literals); + if (litLength > 16) { + ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap); + } + } else { + ZSTD_safecopyLiterals(seqStorePtr->lit, literals, litEnd, litLimit_w); + } + seqStorePtr->lit += litLength; + + /* literal Length */ + if (litLength>0xFFFF) { + assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ + seqStorePtr->longLengthID = 1; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offset = offCode + 1; + + /* match Length */ + if (mlBase>0xFFFF) { + assert(seqStorePtr->longLengthID == 0); /* there can only be a single long length */ + seqStorePtr->longLengthID = 2; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].matchLength = (U16)mlBase; + + seqStorePtr->sequences++; +} + + +/*-************************************* +* Match length counter +***************************************/ +static unsigned ZSTD_NbCommonBytes (size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0; +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, + 0, 3, 1, 3, 1, 4, 2, 7, + 0, 2, 3, 6, 1, 5, 3, 5, + 1, 3, 4, 4, 2, 5, 6, 7, + 7, 0, 1, 2, 3, 3, 4, 6, + 2, 6, 5, 5, 3, 4, 5, 6, + 7, 1, 2, 4, 6, 4, 4, 5, + 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0; +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, + 3, 2, 2, 1, 3, 2, 0, 1, + 3, 3, 1, 2, 2, 2, 2, 0, + 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + return _BitScanReverse64( &r, val ) ? (unsigned)(r >> 3) : 0; +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0; +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) +{ + const BYTE* const pStart = pIn; + const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); + + if (pIn < pInLoopLimit) { + { size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (diff) return ZSTD_NbCommonBytes(diff); } + pIn+=sizeof(size_t); pMatch+=sizeof(size_t); + while (pIn < pInLoopLimit) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } + pIn += ZSTD_NbCommonBytes(diff); + return (size_t)(pIn - pStart); + } } + if (MEM_64bits() && (pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ + +static const U32 prime4bytes = 2654435761U; +static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } + +static const U64 prime5bytes = 889523592379ULL; +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } + +static const U64 prime6bytes = 227718039650203ULL; +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } + +static const U64 prime7bytes = 58295818150454627ULL; +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } + +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } + +MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +{ + switch(mls) + { + default: + case 4: return ZSTD_hash4Ptr(p, hBits); + case 5: return ZSTD_hash5Ptr(p, hBits); + case 6: return ZSTD_hash6Ptr(p, hBits); + case 7: return ZSTD_hash7Ptr(p, hBits); + case 8: return ZSTD_hash8Ptr(p, hBits); + } +} + +/** ZSTD_ipow() : + * Return base^exponent. + */ +static U64 ZSTD_ipow(U64 base, U64 exponent) +{ + U64 power = 1; + while (exponent) { + if (exponent & 1) power *= base; + exponent >>= 1; + base *= base; + } + return power; +} + +#define ZSTD_ROLL_HASH_CHAR_OFFSET 10 + +/** ZSTD_rollingHash_append() : + * Add the buffer to the hash value. + */ +static U64 ZSTD_rollingHash_append(U64 hash, void const* buf, size_t size) +{ + BYTE const* istart = (BYTE const*)buf; + size_t pos; + for (pos = 0; pos < size; ++pos) { + hash *= prime8bytes; + hash += istart[pos] + ZSTD_ROLL_HASH_CHAR_OFFSET; + } + return hash; +} + +/** ZSTD_rollingHash_compute() : + * Compute the rolling hash value of the buffer. + */ +MEM_STATIC U64 ZSTD_rollingHash_compute(void const* buf, size_t size) +{ + return ZSTD_rollingHash_append(0, buf, size); +} + +/** ZSTD_rollingHash_primePower() : + * Compute the primePower to be passed to ZSTD_rollingHash_rotate() for a hash + * over a window of length bytes. + */ +MEM_STATIC U64 ZSTD_rollingHash_primePower(U32 length) +{ + return ZSTD_ipow(prime8bytes, length - 1); +} + +/** ZSTD_rollingHash_rotate() : + * Rotate the rolling hash by one byte. + */ +MEM_STATIC U64 ZSTD_rollingHash_rotate(U64 hash, BYTE toRemove, BYTE toAdd, U64 primePower) +{ + hash -= (toRemove + ZSTD_ROLL_HASH_CHAR_OFFSET) * primePower; + hash *= prime8bytes; + hash += toAdd + ZSTD_ROLL_HASH_CHAR_OFFSET; + return hash; +} + +/*-************************************* +* Round buffer management +***************************************/ +#if (ZSTD_WINDOWLOG_MAX_64 > 31) +# error "ZSTD_WINDOWLOG_MAX is too large : would overflow ZSTD_CURRENT_MAX" +#endif +/* Max current allowed */ +#define ZSTD_CURRENT_MAX ((3U << 29) + (1U << ZSTD_WINDOWLOG_MAX)) +/* Maximum chunk size before overflow correction needs to be called again */ +#define ZSTD_CHUNKSIZE_MAX \ + ( ((U32)-1) /* Maximum ending current index */ \ + - ZSTD_CURRENT_MAX) /* Maximum beginning lowLimit */ + +/** + * ZSTD_window_clear(): + * Clears the window containing the history by simply setting it to empty. + */ +MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) +{ + size_t const endT = (size_t)(window->nextSrc - window->base); + U32 const end = (U32)endT; + + window->lowLimit = end; + window->dictLimit = end; +} + +/** + * ZSTD_window_hasExtDict(): + * Returns non-zero if the window has a non-empty extDict. + */ +MEM_STATIC U32 ZSTD_window_hasExtDict(ZSTD_window_t const window) +{ + return window.lowLimit < window.dictLimit; +} + +/** + * ZSTD_matchState_dictMode(): + * Inspects the provided matchState and figures out what dictMode should be + * passed to the compressor. + */ +MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms) +{ + return ZSTD_window_hasExtDict(ms->window) ? + ZSTD_extDict : + ms->dictMatchState != NULL ? + ZSTD_dictMatchState : + ZSTD_noDict; +} + +/** + * ZSTD_window_needOverflowCorrection(): + * Returns non-zero if the indices are getting too large and need overflow + * protection. + */ +MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window, + void const* srcEnd) +{ + U32 const current = (U32)((BYTE const*)srcEnd - window.base); + return current > ZSTD_CURRENT_MAX; +} + +/** + * ZSTD_window_correctOverflow(): + * Reduces the indices to protect from index overflow. + * Returns the correction made to the indices, which must be applied to every + * stored index. + * + * The least significant cycleLog bits of the indices must remain the same, + * which may be 0. Every index up to maxDist in the past must be valid. + * NOTE: (maxDist & cycleMask) must be zero. + */ +MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, + U32 maxDist, void const* src) +{ + /* preemptive overflow correction: + * 1. correction is large enough: + * lowLimit > (3<<29) ==> current > 3<<29 + 1< (3<<29 + 1< (3<<29) - (1< (3<<29) - (1<<30) (NOTE: chainLog <= 30) + * > 1<<29 + * + * 2. (ip+ZSTD_CHUNKSIZE_MAX - cctx->base) doesn't overflow: + * After correction, current is less than (1<base < 1<<32. + * 3. (cctx->lowLimit + 1< 3<<29 + 1<base); + U32 const currentCycle0 = current & cycleMask; + /* Exclude zero so that newCurrent - maxDist >= 1. */ + U32 const currentCycle1 = currentCycle0 == 0 ? (1U << cycleLog) : currentCycle0; + U32 const newCurrent = currentCycle1 + maxDist; + U32 const correction = current - newCurrent; + assert((maxDist & cycleMask) == 0); + assert(current > newCurrent); + /* Loose bound, should be around 1<<29 (see above) */ + assert(correction > 1<<28); + + window->base += correction; + window->dictBase += correction; + if (window->lowLimit <= correction) window->lowLimit = 1; + else window->lowLimit -= correction; + if (window->dictLimit <= correction) window->dictLimit = 1; + else window->dictLimit -= correction; + + /* Ensure we can still reference the full window. */ + assert(newCurrent >= maxDist); + assert(newCurrent - maxDist >= 1); + /* Ensure that lowLimit and dictLimit didn't underflow. */ + assert(window->lowLimit <= newCurrent); + assert(window->dictLimit <= newCurrent); + + DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction, + window->lowLimit); + return correction; +} + +/** + * ZSTD_window_enforceMaxDist(): + * Updates lowLimit so that: + * (srcEnd - base) - lowLimit == maxDist + loadedDictEnd + * + * It ensures index is valid as long as index >= lowLimit. + * This must be called before a block compression call. + * + * loadedDictEnd is only defined if a dictionary is in use for current compression. + * As the name implies, loadedDictEnd represents the index at end of dictionary. + * The value lies within context's referential, it can be directly compared to blockEndIdx. + * + * If loadedDictEndPtr is NULL, no dictionary is in use, and we use loadedDictEnd == 0. + * If loadedDictEndPtr is not NULL, we set it to zero after updating lowLimit. + * This is because dictionaries are allowed to be referenced fully + * as long as the last byte of the dictionary is in the window. + * Once input has progressed beyond window size, dictionary cannot be referenced anymore. + * + * In normal dict mode, the dictionary lies between lowLimit and dictLimit. + * In dictMatchState mode, lowLimit and dictLimit are the same, + * and the dictionary is below them. + * forceWindow and dictMatchState are therefore incompatible. + */ +MEM_STATIC void +ZSTD_window_enforceMaxDist(ZSTD_window_t* window, + const void* blockEnd, + U32 maxDist, + U32* loadedDictEndPtr, + const ZSTD_matchState_t** dictMatchStatePtr) +{ + U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); + U32 const loadedDictEnd = (loadedDictEndPtr != NULL) ? *loadedDictEndPtr : 0; + DEBUGLOG(5, "ZSTD_window_enforceMaxDist: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", + (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); + + /* - When there is no dictionary : loadedDictEnd == 0. + In which case, the test (blockEndIdx > maxDist) is merely to avoid + overflowing next operation `newLowLimit = blockEndIdx - maxDist`. + - When there is a standard dictionary : + Index referential is copied from the dictionary, + which means it starts from 0. + In which case, loadedDictEnd == dictSize, + and it makes sense to compare `blockEndIdx > maxDist + dictSize` + since `blockEndIdx` also starts from zero. + - When there is an attached dictionary : + loadedDictEnd is expressed within the referential of the context, + so it can be directly compared against blockEndIdx. + */ + if (blockEndIdx > maxDist + loadedDictEnd) { + U32 const newLowLimit = blockEndIdx - maxDist; + if (window->lowLimit < newLowLimit) window->lowLimit = newLowLimit; + if (window->dictLimit < window->lowLimit) { + DEBUGLOG(5, "Update dictLimit to match lowLimit, from %u to %u", + (unsigned)window->dictLimit, (unsigned)window->lowLimit); + window->dictLimit = window->lowLimit; + } + /* On reaching window size, dictionaries are invalidated */ + if (loadedDictEndPtr) *loadedDictEndPtr = 0; + if (dictMatchStatePtr) *dictMatchStatePtr = NULL; + } +} + +/* Similar to ZSTD_window_enforceMaxDist(), + * but only invalidates dictionary + * when input progresses beyond window size. + * assumption : loadedDictEndPtr and dictMatchStatePtr are valid (non NULL) + * loadedDictEnd uses same referential as window->base + * maxDist is the window size */ +MEM_STATIC void +ZSTD_checkDictValidity(const ZSTD_window_t* window, + const void* blockEnd, + U32 maxDist, + U32* loadedDictEndPtr, + const ZSTD_matchState_t** dictMatchStatePtr) +{ + assert(loadedDictEndPtr != NULL); + assert(dictMatchStatePtr != NULL); + { U32 const blockEndIdx = (U32)((BYTE const*)blockEnd - window->base); + U32 const loadedDictEnd = *loadedDictEndPtr; + DEBUGLOG(5, "ZSTD_checkDictValidity: blockEndIdx=%u, maxDist=%u, loadedDictEnd=%u", + (unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd); + assert(blockEndIdx >= loadedDictEnd); + + if (blockEndIdx > loadedDictEnd + maxDist) { + /* On reaching window size, dictionaries are invalidated. + * For simplification, if window size is reached anywhere within next block, + * the dictionary is invalidated for the full block. + */ + DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)"); + *loadedDictEndPtr = 0; + *dictMatchStatePtr = NULL; + } else { + if (*loadedDictEndPtr != 0) { + DEBUGLOG(6, "dictionary considered valid for current block"); + } } } +} + +MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { + memset(window, 0, sizeof(*window)); + window->base = (BYTE const*)""; + window->dictBase = (BYTE const*)""; + window->dictLimit = 1; /* start from 1, so that 1st position is valid */ + window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ + window->nextSrc = window->base + 1; /* see issue #1241 */ +} + +/** + * ZSTD_window_update(): + * Updates the window by appending [src, src + srcSize) to the window. + * If it is not contiguous, the current prefix becomes the extDict, and we + * forget about the extDict. Handles overlap of the prefix and extDict. + * Returns non-zero if the segment is contiguous. + */ +MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, + void const* src, size_t srcSize) +{ + BYTE const* const ip = (BYTE const*)src; + U32 contiguous = 1; + DEBUGLOG(5, "ZSTD_window_update"); + if (srcSize == 0) + return contiguous; + assert(window->base != NULL); + assert(window->dictBase != NULL); + /* Check if blocks follow each other */ + if (src != window->nextSrc) { + /* not contiguous */ + size_t const distanceFromBase = (size_t)(window->nextSrc - window->base); + DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit); + window->lowLimit = window->dictLimit; + assert(distanceFromBase == (size_t)(U32)distanceFromBase); /* should never overflow */ + window->dictLimit = (U32)distanceFromBase; + window->dictBase = window->base; + window->base = ip - distanceFromBase; + /* ms->nextToUpdate = window->dictLimit; */ + if (window->dictLimit - window->lowLimit < HASH_READ_SIZE) window->lowLimit = window->dictLimit; /* too small extDict */ + contiguous = 0; + } + window->nextSrc = ip + srcSize; + /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ + if ( (ip+srcSize > window->dictBase + window->lowLimit) + & (ip < window->dictBase + window->dictLimit)) { + ptrdiff_t const highInputIdx = (ip + srcSize) - window->dictBase; + U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)window->dictLimit) ? window->dictLimit : (U32)highInputIdx; + window->lowLimit = lowLimitMax; + DEBUGLOG(5, "Overlapping extDict and input : new lowLimit = %u", window->lowLimit); + } + return contiguous; +} + +/** + * Returns the lowest allowed match index. It may either be in the ext-dict or the prefix. + */ +MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog) +{ + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.lowLimit; + U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid; + U32 const isDictionary = (ms->loadedDictEnd != 0); + U32 const matchLowest = isDictionary ? lowestValid : withinWindow; + return matchLowest; +} + +/** + * Returns the lowest allowed match index in the prefix. + */ +MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 current, unsigned windowLog) +{ + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.dictLimit; + U32 const withinWindow = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid; + U32 const isDictionary = (ms->loadedDictEnd != 0); + U32 const matchLowest = isDictionary ? lowestValid : withinWindow; + return matchLowest; +} + + + +/* debug functions */ +#if (DEBUGLEVEL>=2) + +MEM_STATIC double ZSTD_fWeight(U32 rawStat) +{ + U32 const fp_accuracy = 8; + U32 const fp_multiplier = (1 << fp_accuracy); + U32 const newStat = rawStat + 1; + U32 const hb = ZSTD_highbit32(newStat); + U32 const BWeight = hb * fp_multiplier; + U32 const FWeight = (newStat << fp_accuracy) >> hb; + U32 const weight = BWeight + FWeight; + assert(hb + fp_accuracy < 31); + return (double)weight / fp_multiplier; +} + +/* display a table content, + * listing each element, its frequency, and its predicted bit cost */ +MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max) +{ + unsigned u, sum; + for (u=0, sum=0; u<=max; u++) sum += table[u]; + DEBUGLOG(2, "total nb elts: %u", sum); + for (u=0; u<=max; u++) { + DEBUGLOG(2, "%2u: %5u (%.2f)", + u, table[u], ZSTD_fWeight(sum) - ZSTD_fWeight(table[u]) ); + } +} + +#endif + + +#if defined (__cplusplus) +} +#endif + +/* =============================================================== + * Shared internal declarations + * These prototypes may be called from sources not in lib/compress + * =============================================================== */ + +/* ZSTD_loadCEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * return : size of dictionary header (size of magic number + dict ID + entropy tables) + * assumptions : magic number supposed already checked + * and dictSize >= 8 */ +size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace, + short* offcodeNCount, unsigned* offcodeMaxValue, + const void* const dict, size_t dictSize); + +void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs); + +/* ============================================================== + * Private declarations + * These prototypes shall only be called from within lib/compress + * ============================================================== */ + +/* ZSTD_getCParamsFromCCtxParams() : + * cParams are built depending on compressionLevel, src size hints, + * LDM and manually set compression parameters. + * Note: srcSizeHint == 0 means 0! + */ +ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( + const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize); + +/*! ZSTD_initCStream_internal() : + * Private use only. Init streaming operation. + * expects params to be valid. + * must receive dict, or cdict, or none, but not both. + * @return : 0, or an error code */ +size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, unsigned long long pledgedSrcSize); + +void ZSTD_resetSeqStore(seqStore_t* ssPtr); + +/*! ZSTD_getCParamsFromCDict() : + * as the name implies */ +ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict); + +/* ZSTD_compressBegin_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ +size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_dictContentType_e dictContentType, + ZSTD_dictTableLoadMethod_e dtlm, + const ZSTD_CDict* cdict, + const ZSTD_CCtx_params* params, + unsigned long long pledgedSrcSize); + +/* ZSTD_compress_advanced_internal() : + * Private use only. To be called from zstdmt_compress.c. */ +size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + const ZSTD_CCtx_params* params); + + +/* ZSTD_writeLastEmptyBlock() : + * output an empty Block with end-of-frame mark to complete a frame + * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h)) + * or an error code if `dstCapacity` is too small ( 1 */ +U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat); + +#endif /* ZSTD_COMPRESS_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.c b/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.c new file mode 100644 index 0000000000..17e7168d89 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.c @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /*-************************************* + * Dependencies + ***************************************/ +#include "zstd_compress_literals.h" + +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, ""); + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); + break; + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); + break; + default: /* not necessary : flSize is {1,2,3} */ + assert(0); + } + + memcpy(ostart + flSize, src, srcSize); + DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize)); + return srcSize + flSize; +} + +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); + break; + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); + break; + default: /* not necessary : flSize is {1,2,3} */ + assert(0); + } + + ostart[flSize] = *(const BYTE*)src; + DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1); + return flSize+1; +} + +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, int disableLiteralCompression, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const int bmi2) +{ + size_t const minGain = ZSTD_minGain(srcSize, strategy); + size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); + BYTE* const ostart = (BYTE*)dst; + U32 singleStream = srcSize < 256; + symbolEncodingType_e hType = set_compressed; + size_t cLitSize; + + DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)", + disableLiteralCompression, (U32)srcSize); + + /* Prepare nextEntropy assuming reusing the existing table */ + memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + + if (disableLiteralCompression) + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + + /* small ? don't even attempt compression (speed opt) */ +# define COMPRESS_LITERALS_SIZE_MIN 63 + { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; + if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + + RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression"); + { HUF_repeat repeat = prevHuf->repeatMode; + int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; + cLitSize = singleStream ? + HUF_compress1X_repeat( + ostart+lhSize, dstCapacity-lhSize, src, srcSize, + HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, + (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) : + HUF_compress4X_repeat( + ostart+lhSize, dstCapacity-lhSize, src, srcSize, + HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, + (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); + if (repeat != HUF_repeat_none) { + /* reused the existing table */ + DEBUGLOG(5, "Reusing previous huffman table"); + hType = set_repeat; + } + } + + if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) { + memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + if (cLitSize==1) { + memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } + + if (hType == set_compressed) { + /* using a newly constructed table */ + nextHuf->repeatMode = HUF_repeat_check; + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + case 5: /* 2 - 2 - 18 - 18 */ + { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + default: /* not possible : lhSize is {3,4,5} */ + assert(0); + } + DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize)); + return lhSize+cLitSize; +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.h b/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.h new file mode 100644 index 0000000000..8b08705743 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_literals.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPRESS_LITERALS_H +#define ZSTD_COMPRESS_LITERALS_H + +#include "zstd_compress_internal.h" /* ZSTD_hufCTables_t, ZSTD_minGain() */ + + +size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); + +size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_strategy strategy, int disableLiteralCompression, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + void* entropyWorkspace, size_t entropyWorkspaceSize, + const int bmi2); + +#endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.c b/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.c new file mode 100644 index 0000000000..f9f8097c83 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.c @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /*-************************************* + * Dependencies + ***************************************/ +#include "zstd_compress_sequences.h" + +/** + * -log2(x / 256) lookup table for x in [0, 256). + * If x == 0: Return 0 + * Else: Return floor(-log2(x / 256) * 256) + */ +static unsigned const kInverseProbabilityLog256[256] = { + 0, 2048, 1792, 1642, 1536, 1453, 1386, 1329, 1280, 1236, 1197, 1162, + 1130, 1100, 1073, 1047, 1024, 1001, 980, 960, 941, 923, 906, 889, + 874, 859, 844, 830, 817, 804, 791, 779, 768, 756, 745, 734, + 724, 714, 704, 694, 685, 676, 667, 658, 650, 642, 633, 626, + 618, 610, 603, 595, 588, 581, 574, 567, 561, 554, 548, 542, + 535, 529, 523, 517, 512, 506, 500, 495, 489, 484, 478, 473, + 468, 463, 458, 453, 448, 443, 438, 434, 429, 424, 420, 415, + 411, 407, 402, 398, 394, 390, 386, 382, 377, 373, 370, 366, + 362, 358, 354, 350, 347, 343, 339, 336, 332, 329, 325, 322, + 318, 315, 311, 308, 305, 302, 298, 295, 292, 289, 286, 282, + 279, 276, 273, 270, 267, 264, 261, 258, 256, 253, 250, 247, + 244, 241, 239, 236, 233, 230, 228, 225, 222, 220, 217, 215, + 212, 209, 207, 204, 202, 199, 197, 194, 192, 190, 187, 185, + 182, 180, 178, 175, 173, 171, 168, 166, 164, 162, 159, 157, + 155, 153, 151, 149, 146, 144, 142, 140, 138, 136, 134, 132, + 130, 128, 126, 123, 121, 119, 117, 115, 114, 112, 110, 108, + 106, 104, 102, 100, 98, 96, 94, 93, 91, 89, 87, 85, + 83, 82, 80, 78, 76, 74, 73, 71, 69, 67, 66, 64, + 62, 61, 59, 57, 55, 54, 52, 50, 49, 47, 46, 44, + 42, 41, 39, 37, 36, 34, 33, 31, 30, 28, 26, 25, + 23, 22, 20, 19, 17, 16, 14, 13, 11, 10, 8, 7, + 5, 4, 2, 1, +}; + +static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) { + void const* ptr = ctable; + U16 const* u16ptr = (U16 const*)ptr; + U32 const maxSymbolValue = MEM_read16(u16ptr + 1); + return maxSymbolValue; +} + +/** + * Returns the cost in bytes of encoding the normalized count header. + * Returns an error if any of the helper functions return an error. + */ +static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max, + size_t const nbSeq, unsigned const FSELog) +{ + BYTE wksp[FSE_NCOUNTBOUND]; + S16 norm[MaxSeq + 1]; + const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max), ""); + return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog); +} + +/** + * Returns the cost in bits of encoding the distribution described by count + * using the entropy bound. + */ +static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t const total) +{ + unsigned cost = 0; + unsigned s; + for (s = 0; s <= max; ++s) { + unsigned norm = (unsigned)((256 * count[s]) / total); + if (count[s] != 0 && norm == 0) + norm = 1; + assert(count[s] < total); + cost += count[s] * kInverseProbabilityLog256[norm]; + } + return cost >> 8; +} + +/** + * Returns the cost in bits of encoding the distribution in count using ctable. + * Returns an error if ctable cannot represent all the symbols in count. + */ +size_t ZSTD_fseBitCost( + FSE_CTable const* ctable, + unsigned const* count, + unsigned const max) +{ + unsigned const kAccuracyLog = 8; + size_t cost = 0; + unsigned s; + FSE_CState_t cstate; + FSE_initCState(&cstate, ctable); + if (ZSTD_getFSEMaxSymbolValue(ctable) < max) { + DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u", + ZSTD_getFSEMaxSymbolValue(ctable), max); + return ERROR(GENERIC); + } + for (s = 0; s <= max; ++s) { + unsigned const tableLog = cstate.stateLog; + unsigned const badCost = (tableLog + 1) << kAccuracyLog; + unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog); + if (count[s] == 0) + continue; + if (bitCost >= badCost) { + DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s); + return ERROR(GENERIC); + } + cost += (size_t)count[s] * bitCost; + } + return cost >> kAccuracyLog; +} + +/** + * Returns the cost in bits of encoding the distribution in count using the + * table described by norm. The max symbol support by norm is assumed >= max. + * norm must be valid for every symbol with non-zero probability in count. + */ +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, + unsigned const* count, unsigned const max) +{ + unsigned const shift = 8 - accuracyLog; + size_t cost = 0; + unsigned s; + assert(accuracyLog <= 8); + for (s = 0; s <= max; ++s) { + unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1; + unsigned const norm256 = normAcc << shift; + assert(norm256 > 0); + assert(norm256 < 256); + cost += count[s] * kInverseProbabilityLog256[norm256]; + } + return cost >> 8; +} + +symbolEncodingType_e +ZSTD_selectEncodingType( + FSE_repeat* repeatMode, unsigned const* count, unsigned const max, + size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, + FSE_CTable const* prevCTable, + short const* defaultNorm, U32 defaultNormLog, + ZSTD_defaultPolicy_e const isDefaultAllowed, + ZSTD_strategy const strategy) +{ + ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0); + if (mostFrequent == nbSeq) { + *repeatMode = FSE_repeat_none; + if (isDefaultAllowed && nbSeq <= 2) { + /* Prefer set_basic over set_rle when there are 2 or less symbols, + * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol. + * If basic encoding isn't possible, always choose RLE. + */ + DEBUGLOG(5, "Selected set_basic"); + return set_basic; + } + DEBUGLOG(5, "Selected set_rle"); + return set_rle; + } + if (strategy < ZSTD_lazy) { + if (isDefaultAllowed) { + size_t const staticFse_nbSeq_max = 1000; + size_t const mult = 10 - strategy; + size_t const baseLog = 3; + size_t const dynamicFse_nbSeq_min = (((size_t)1 << defaultNormLog) * mult) >> baseLog; /* 28-36 for offset, 56-72 for lengths */ + assert(defaultNormLog >= 5 && defaultNormLog <= 6); /* xx_DEFAULTNORMLOG */ + assert(mult <= 9 && mult >= 7); + if ( (*repeatMode == FSE_repeat_valid) + && (nbSeq < staticFse_nbSeq_max) ) { + DEBUGLOG(5, "Selected set_repeat"); + return set_repeat; + } + if ( (nbSeq < dynamicFse_nbSeq_min) + || (mostFrequent < (nbSeq >> (defaultNormLog-1))) ) { + DEBUGLOG(5, "Selected set_basic"); + /* The format allows default tables to be repeated, but it isn't useful. + * When using simple heuristics to select encoding type, we don't want + * to confuse these tables with dictionaries. When running more careful + * analysis, we don't need to waste time checking both repeating tables + * and default tables. + */ + *repeatMode = FSE_repeat_none; + return set_basic; + } + } + } else { + size_t const basicCost = isDefaultAllowed ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, count, max) : ERROR(GENERIC); + size_t const repeatCost = *repeatMode != FSE_repeat_none ? ZSTD_fseBitCost(prevCTable, count, max) : ERROR(GENERIC); + size_t const NCountCost = ZSTD_NCountCost(count, max, nbSeq, FSELog); + size_t const compressedCost = (NCountCost << 3) + ZSTD_entropyCost(count, max, nbSeq); + + if (isDefaultAllowed) { + assert(!ZSTD_isError(basicCost)); + assert(!(*repeatMode == FSE_repeat_valid && ZSTD_isError(repeatCost))); + } + assert(!ZSTD_isError(NCountCost)); + assert(compressedCost < ERROR(maxCode)); + DEBUGLOG(5, "Estimated bit costs: basic=%u\trepeat=%u\tcompressed=%u", + (unsigned)basicCost, (unsigned)repeatCost, (unsigned)compressedCost); + if (basicCost <= repeatCost && basicCost <= compressedCost) { + DEBUGLOG(5, "Selected set_basic"); + assert(isDefaultAllowed); + *repeatMode = FSE_repeat_none; + return set_basic; + } + if (repeatCost <= compressedCost) { + DEBUGLOG(5, "Selected set_repeat"); + assert(!ZSTD_isError(repeatCost)); + return set_repeat; + } + assert(compressedCost < basicCost && compressedCost < repeatCost); + } + DEBUGLOG(5, "Selected set_compressed"); + *repeatMode = FSE_repeat_check; + return set_compressed; +} + +size_t +ZSTD_buildCTable(void* dst, size_t dstCapacity, + FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, + unsigned* count, U32 max, + const BYTE* codeTable, size_t nbSeq, + const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, + const FSE_CTable* prevCTable, size_t prevCTableSize, + void* entropyWorkspace, size_t entropyWorkspaceSize) +{ + BYTE* op = (BYTE*)dst; + const BYTE* const oend = op + dstCapacity; + DEBUGLOG(6, "ZSTD_buildCTable (dstCapacity=%u)", (unsigned)dstCapacity); + + switch (type) { + case set_rle: + FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), ""); + RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space"); + *op = codeTable[0]; + return 1; + case set_repeat: + memcpy(nextCTable, prevCTable, prevCTableSize); + return 0; + case set_basic: + FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */ + return 0; + case set_compressed: { + S16 norm[MaxSeq + 1]; + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max); + if (count[codeTable[nbSeq-1]] > 1) { + count[codeTable[nbSeq-1]]--; + nbSeq_1--; + } + assert(nbSeq_1 > 1); + FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max), ""); + { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */ + FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); + FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize), ""); + return NCountSize; + } + } + default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach"); + } +} + +FORCE_INLINE_TEMPLATE size_t +ZSTD_encodeSequences_body( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets) +{ + BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; + FSE_CState_t stateOffsetBits; + FSE_CState_t stateLitLength; + + RETURN_ERROR_IF( + ERR_isError(BIT_initCStream(&blockStream, dst, dstCapacity)), + dstSize_tooSmall, "not enough space remaining"); + DEBUGLOG(6, "available space for bitstream : %i (dstCapacity=%u)", + (int)(blockStream.endPtr - blockStream.startPtr), + (unsigned)dstCapacity); + + /* first symbols */ + FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); + FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); + FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); + BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + if (longOffsets) { + U32 const ofBits = ofCodeTable[nbSeq-1]; + unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); + BIT_flushBits(&blockStream); + } + BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, + ofBits - extraBits); + } else { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); + } + BIT_flushBits(&blockStream); + + { size_t n; + for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) + BIT_flushBits(&blockStream); /* (7)*/ + BIT_addBits(&blockStream, sequences[n].litLength, llBits); + if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); + if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); + if (longOffsets) { + unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[n].offset, extraBits); + BIT_flushBits(&blockStream); /* (7)*/ + } + BIT_addBits(&blockStream, sequences[n].offset >> extraBits, + ofBits - extraBits); /* 31 */ + } else { + BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ + } + BIT_flushBits(&blockStream); /* (7)*/ + DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); + } } + + DEBUGLOG(6, "ZSTD_encodeSequences: flushing ML state with %u bits", stateMatchLength.stateLog); + FSE_flushCState(&blockStream, &stateMatchLength); + DEBUGLOG(6, "ZSTD_encodeSequences: flushing Off state with %u bits", stateOffsetBits.stateLog); + FSE_flushCState(&blockStream, &stateOffsetBits); + DEBUGLOG(6, "ZSTD_encodeSequences: flushing LL state with %u bits", stateLitLength.stateLog); + FSE_flushCState(&blockStream, &stateLitLength); + + { size_t const streamSize = BIT_closeCStream(&blockStream); + RETURN_ERROR_IF(streamSize==0, dstSize_tooSmall, "not enough space"); + return streamSize; + } +} + +static size_t +ZSTD_encodeSequences_default( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets) +{ + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + + +#if DYNAMIC_BMI2 + +static TARGET_ATTRIBUTE("bmi2") size_t +ZSTD_encodeSequences_bmi2( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets) +{ + return ZSTD_encodeSequences_body(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} + +#endif + +size_t ZSTD_encodeSequences( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2) +{ + DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity); +#if DYNAMIC_BMI2 + if (bmi2) { + return ZSTD_encodeSequences_bmi2(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); + } +#endif + (void)bmi2; + return ZSTD_encodeSequences_default(dst, dstCapacity, + CTable_MatchLength, mlCodeTable, + CTable_OffsetBits, ofCodeTable, + CTable_LitLength, llCodeTable, + sequences, nbSeq, longOffsets); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.h b/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.h new file mode 100644 index 0000000000..68c6f9a5ac --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_sequences.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPRESS_SEQUENCES_H +#define ZSTD_COMPRESS_SEQUENCES_H + +#include "../common/fse.h" /* FSE_repeat, FSE_CTable */ +#include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */ + +typedef enum { + ZSTD_defaultDisallowed = 0, + ZSTD_defaultAllowed = 1 +} ZSTD_defaultPolicy_e; + +symbolEncodingType_e +ZSTD_selectEncodingType( + FSE_repeat* repeatMode, unsigned const* count, unsigned const max, + size_t const mostFrequent, size_t nbSeq, unsigned const FSELog, + FSE_CTable const* prevCTable, + short const* defaultNorm, U32 defaultNormLog, + ZSTD_defaultPolicy_e const isDefaultAllowed, + ZSTD_strategy const strategy); + +size_t +ZSTD_buildCTable(void* dst, size_t dstCapacity, + FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type, + unsigned* count, U32 max, + const BYTE* codeTable, size_t nbSeq, + const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax, + const FSE_CTable* prevCTable, size_t prevCTableSize, + void* entropyWorkspace, size_t entropyWorkspaceSize); + +size_t ZSTD_encodeSequences( + void* dst, size_t dstCapacity, + FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, + FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable, + FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable, + seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2); + +size_t ZSTD_fseBitCost( + FSE_CTable const* ctable, + unsigned const* count, + unsigned const max); + +size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog, + unsigned const* count, unsigned const max); +#endif /* ZSTD_COMPRESS_SEQUENCES_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.c b/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.c new file mode 100644 index 0000000000..b693866c0a --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.c @@ -0,0 +1,845 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + /*-************************************* + * Dependencies + ***************************************/ +#include "zstd_compress_superblock.h" + +#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */ +#include "hist.h" /* HIST_countFast_wksp */ +#include "zstd_compress_internal.h" +#include "zstd_compress_sequences.h" +#include "zstd_compress_literals.h" + +/*-************************************* +* Superblock entropy buffer structs +***************************************/ +/** ZSTD_hufCTablesMetadata_t : + * Stores Literals Block Type for a super-block in hType, and + * huffman tree description in hufDesBuffer. + * hufDesSize refers to the size of huffman tree description in bytes. + * This metadata is populated in ZSTD_buildSuperBlockEntropy_literal() */ +typedef struct { + symbolEncodingType_e hType; + BYTE hufDesBuffer[500]; /* TODO give name to this value */ + size_t hufDesSize; +} ZSTD_hufCTablesMetadata_t; + +/** ZSTD_fseCTablesMetadata_t : + * Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and + * fse tables in fseTablesBuffer. + * fseTablesSize refers to the size of fse tables in bytes. + * This metadata is populated in ZSTD_buildSuperBlockEntropy_sequences() */ +typedef struct { + symbolEncodingType_e llType; + symbolEncodingType_e ofType; + symbolEncodingType_e mlType; + BYTE fseTablesBuffer[500]; /* TODO give name to this value */ + size_t fseTablesSize; + size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_compressSubBlock_sequences() */ +} ZSTD_fseCTablesMetadata_t; + +typedef struct { + ZSTD_hufCTablesMetadata_t hufMetadata; + ZSTD_fseCTablesMetadata_t fseMetadata; +} ZSTD_entropyCTablesMetadata_t; + + +/** ZSTD_buildSuperBlockEntropy_literal() : + * Builds entropy for the super-block literals. + * Stores literals block type (raw, rle, compressed, repeat) and + * huffman description table to hufMetadata. + * @return : size of huffman description table or error code */ +static size_t ZSTD_buildSuperBlockEntropy_literal(void* const src, size_t srcSize, + const ZSTD_hufCTables_t* prevHuf, + ZSTD_hufCTables_t* nextHuf, + ZSTD_hufCTablesMetadata_t* hufMetadata, + const int disableLiteralsCompression, + void* workspace, size_t wkspSize) +{ + BYTE* const wkspStart = (BYTE*)workspace; + BYTE* const wkspEnd = wkspStart + wkspSize; + BYTE* const countWkspStart = wkspStart; + unsigned* const countWksp = (unsigned*)workspace; + const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned); + BYTE* const nodeWksp = countWkspStart + countWkspSize; + const size_t nodeWkspSize = wkspEnd-nodeWksp; + unsigned maxSymbolValue = 255; + unsigned huffLog = HUF_TABLELOG_DEFAULT; + HUF_repeat repeat = prevHuf->repeatMode; + + DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_literal (srcSize=%zu)", srcSize); + + /* Prepare nextEntropy assuming reusing the existing table */ + memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + + if (disableLiteralsCompression) { + DEBUGLOG(5, "set_basic - disabled"); + hufMetadata->hType = set_basic; + return 0; + } + + /* small ? don't even attempt compression (speed opt) */ +# define COMPRESS_LITERALS_SIZE_MIN 63 + { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN; + if (srcSize <= minLitSize) { + DEBUGLOG(5, "set_basic - too small"); + hufMetadata->hType = set_basic; + return 0; + } + } + + /* Scan input and build symbol stats */ + { size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)src, srcSize, workspace, wkspSize); + FORWARD_IF_ERROR(largest, "HIST_count_wksp failed"); + if (largest == srcSize) { + DEBUGLOG(5, "set_rle"); + hufMetadata->hType = set_rle; + return 0; + } + if (largest <= (srcSize >> 7)+4) { + DEBUGLOG(5, "set_basic - no gain"); + hufMetadata->hType = set_basic; + return 0; + } + } + + /* Validate the previous Huffman table */ + if (repeat == HUF_repeat_check && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) { + repeat = HUF_repeat_none; + } + + /* Build Huffman Tree */ + memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable)); + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp, + maxSymbolValue, huffLog, + nodeWksp, nodeWkspSize); + FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp"); + huffLog = (U32)maxBits; + { /* Build and write the CTable */ + size_t const newCSize = HUF_estimateCompressedSize( + (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue); + size_t const hSize = HUF_writeCTable( + hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer), + (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog); + /* Check against repeating the previous CTable */ + if (repeat != HUF_repeat_none) { + size_t const oldCSize = HUF_estimateCompressedSize( + (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue); + if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) { + DEBUGLOG(5, "set_repeat - smaller"); + memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_repeat; + return 0; + } + } + if (newCSize + hSize >= srcSize) { + DEBUGLOG(5, "set_basic - no gains"); + memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); + hufMetadata->hType = set_basic; + return 0; + } + DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize); + hufMetadata->hType = set_compressed; + nextHuf->repeatMode = HUF_repeat_check; + return hSize; + } + } +} + +/** ZSTD_buildSuperBlockEntropy_sequences() : + * Builds entropy for the super-block sequences. + * Stores symbol compression modes and fse table to fseMetadata. + * @return : size of fse tables or error code */ +static size_t ZSTD_buildSuperBlockEntropy_sequences(seqStore_t* seqStorePtr, + const ZSTD_fseCTables_t* prevEntropy, + ZSTD_fseCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize) +{ + BYTE* const wkspStart = (BYTE*)workspace; + BYTE* const wkspEnd = wkspStart + wkspSize; + BYTE* const countWkspStart = wkspStart; + unsigned* const countWksp = (unsigned*)workspace; + const size_t countWkspSize = (MaxSeq + 1) * sizeof(unsigned); + BYTE* const cTableWksp = countWkspStart + countWkspSize; + const size_t cTableWkspSize = wkspEnd-cTableWksp; + ZSTD_strategy const strategy = cctxParams->cParams.strategy; + FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable; + FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable; + FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable; + const BYTE* const ofCodeTable = seqStorePtr->ofCode; + const BYTE* const llCodeTable = seqStorePtr->llCode; + const BYTE* const mlCodeTable = seqStorePtr->mlCode; + size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + BYTE* const ostart = fseMetadata->fseTablesBuffer; + BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer); + BYTE* op = ostart; + + assert(cTableWkspSize >= (1 << MaxFSELog) * sizeof(FSE_FUNCTION_TYPE)); + DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy_sequences (nbSeq=%zu)", nbSeq); + memset(workspace, 0, wkspSize); + + fseMetadata->lastCountSize = 0; + /* convert length/distances into codes */ + ZSTD_seqToCodes(seqStorePtr); + /* build CTable for Literal Lengths */ + { U32 LLtype; + unsigned max = MaxLL; + size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, llCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ + DEBUGLOG(5, "Building LL table"); + nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode; + LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode, + countWksp, max, mostFrequent, nbSeq, + LLFSELog, prevEntropy->litlengthCTable, + LL_defaultNorm, LL_defaultNormLog, + ZSTD_defaultAllowed, strategy); + assert(set_basic < set_compressed && set_rle < set_compressed); + assert(!(LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_LitLength, LLFSELog, (symbolEncodingType_e)LLtype, + countWksp, max, llCodeTable, nbSeq, LL_defaultNorm, LL_defaultNormLog, MaxLL, + prevEntropy->litlengthCTable, sizeof(prevEntropy->litlengthCTable), + cTableWksp, cTableWkspSize); + FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for LitLens failed"); + if (LLtype == set_compressed) + fseMetadata->lastCountSize = countSize; + op += countSize; + fseMetadata->llType = (symbolEncodingType_e) LLtype; + } } + /* build CTable for Offsets */ + { U32 Offtype; + unsigned max = MaxOff; + size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, ofCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ + /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */ + ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed; + DEBUGLOG(5, "Building OF table"); + nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode; + Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode, + countWksp, max, mostFrequent, nbSeq, + OffFSELog, prevEntropy->offcodeCTable, + OF_defaultNorm, OF_defaultNormLog, + defaultPolicy, strategy); + assert(!(Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)Offtype, + countWksp, max, ofCodeTable, nbSeq, OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff, + prevEntropy->offcodeCTable, sizeof(prevEntropy->offcodeCTable), + cTableWksp, cTableWkspSize); + FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for Offsets failed"); + if (Offtype == set_compressed) + fseMetadata->lastCountSize = countSize; + op += countSize; + fseMetadata->ofType = (symbolEncodingType_e) Offtype; + } } + /* build CTable for MatchLengths */ + { U32 MLtype; + unsigned max = MaxML; + size_t const mostFrequent = HIST_countFast_wksp(countWksp, &max, mlCodeTable, nbSeq, workspace, wkspSize); /* can't fail */ + DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op)); + nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode; + MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode, + countWksp, max, mostFrequent, nbSeq, + MLFSELog, prevEntropy->matchlengthCTable, + ML_defaultNorm, ML_defaultNormLog, + ZSTD_defaultAllowed, strategy); + assert(!(MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */ + { size_t const countSize = ZSTD_buildCTable(op, oend - op, CTable_MatchLength, MLFSELog, (symbolEncodingType_e)MLtype, + countWksp, max, mlCodeTable, nbSeq, ML_defaultNorm, ML_defaultNormLog, MaxML, + prevEntropy->matchlengthCTable, sizeof(prevEntropy->matchlengthCTable), + cTableWksp, cTableWkspSize); + FORWARD_IF_ERROR(countSize, "ZSTD_buildCTable for MatchLengths failed"); + if (MLtype == set_compressed) + fseMetadata->lastCountSize = countSize; + op += countSize; + fseMetadata->mlType = (symbolEncodingType_e) MLtype; + } } + assert((size_t) (op-ostart) <= sizeof(fseMetadata->fseTablesBuffer)); + return op-ostart; +} + + +/** ZSTD_buildSuperBlockEntropy() : + * Builds entropy for the super-block. + * @return : 0 on success or error code */ +static size_t +ZSTD_buildSuperBlockEntropy(seqStore_t* seqStorePtr, + const ZSTD_entropyCTables_t* prevEntropy, + ZSTD_entropyCTables_t* nextEntropy, + const ZSTD_CCtx_params* cctxParams, + ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize) +{ + size_t const litSize = seqStorePtr->lit - seqStorePtr->litStart; + DEBUGLOG(5, "ZSTD_buildSuperBlockEntropy"); + entropyMetadata->hufMetadata.hufDesSize = + ZSTD_buildSuperBlockEntropy_literal(seqStorePtr->litStart, litSize, + &prevEntropy->huf, &nextEntropy->huf, + &entropyMetadata->hufMetadata, + ZSTD_disableLiteralsCompression(cctxParams), + workspace, wkspSize); + FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildSuperBlockEntropy_literal failed"); + entropyMetadata->fseMetadata.fseTablesSize = + ZSTD_buildSuperBlockEntropy_sequences(seqStorePtr, + &prevEntropy->fse, &nextEntropy->fse, + cctxParams, + &entropyMetadata->fseMetadata, + workspace, wkspSize); + FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildSuperBlockEntropy_sequences failed"); + return 0; +} + +/** ZSTD_compressSubBlock_literal() : + * Compresses literals section for a sub-block. + * When we have to write the Huffman table we will sometimes choose a header + * size larger than necessary. This is because we have to pick the header size + * before we know the table size + compressed size, so we have a bound on the + * table size. If we guessed incorrectly, we fall back to uncompressed literals. + * + * We write the header when writeEntropy=1 and set entropyWrriten=1 when we succeeded + * in writing the header, otherwise it is set to 0. + * + * hufMetadata->hType has literals block type info. + * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. + * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. + * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block + * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block + * and the following sub-blocks' literals sections will be Treeless_Literals_Block. + * @return : compressed size of literals section of a sub-block + * Or 0 if it unable to compress. + * Or error code */ +static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + const BYTE* literals, size_t litSize, + void* dst, size_t dstSize, + const int bmi2, int writeEntropy, int* entropyWritten) +{ + size_t const header = writeEntropy ? 200 : 0; + size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header)); + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart + lhSize; + U32 const singleStream = lhSize == 3; + symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat; + size_t cLitSize = 0; + + (void)bmi2; /* TODO bmi2... */ + + DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy); + + *entropyWritten = 0; + if (litSize == 0 || hufMetadata->hType == set_basic) { + DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal"); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } else if (hufMetadata->hType == set_rle) { + DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal"); + return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize); + } + + assert(litSize > 0); + assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat); + + if (writeEntropy && hufMetadata->hType == set_compressed) { + memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize); + op += hufMetadata->hufDesSize; + cLitSize += hufMetadata->hufDesSize; + DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize); + } + + /* TODO bmi2 */ + { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable) + : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable); + op += cSize; + cLitSize += cSize; + if (cSize == 0 || ERR_isError(cSize)) { + DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize)); + return 0; + } + /* If we expand and we aren't writing a header then emit uncompressed */ + if (!writeEntropy && cLitSize >= litSize) { + DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible"); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + /* If we are writing headers then allow expansion that doesn't change our header size. */ + if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) { + assert(cLitSize > litSize); + DEBUGLOG(5, "Literals expanded beyond allowed header size"); + return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize); + } + DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize); + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + case 5: /* 2 - 2 - 18 - 18 */ + { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + default: /* not possible : lhSize is {3,4,5} */ + assert(0); + } + *entropyWritten = 1; + DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart)); + return op-ostart; +} + +static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) { + const seqDef* const sstart = sequences; + const seqDef* const send = sequences + nbSeq; + const seqDef* sp = sstart; + size_t matchLengthSum = 0; + size_t litLengthSum = 0; + while (send-sp > 0) { + ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); + litLengthSum += seqLen.litLength; + matchLengthSum += seqLen.matchLength; + sp++; + } + assert(litLengthSum <= litSize); + if (!lastSequence) { + assert(litLengthSum == litSize); + } + return matchLengthSum + litSize; +} + +/** ZSTD_compressSubBlock_sequences() : + * Compresses sequences section for a sub-block. + * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have + * symbol compression modes for the super-block. + * The first successfully compressed block will have these in its header. + * We set entropyWritten=1 when we succeed in compressing the sequences. + * The following sub-blocks will always have repeat mode. + * @return : compressed size of sequences section of a sub-block + * Or 0 if it is unable to compress + * Or error code. */ +static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + const seqDef* sequences, size_t nbSeq, + const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const int bmi2, int writeEntropy, int* entropyWritten) +{ + const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + BYTE* seqHead; + + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets); + + *entropyWritten = 0; + /* Sequences Header */ + RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/, + dstSize_tooSmall, ""); + if (nbSeq < 0x7F) + *op++ = (BYTE)nbSeq; + else if (nbSeq < LONGNBSEQ) + op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; + else + op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + if (nbSeq==0) { + return op - ostart; + } + + /* seqHead : flags for FSE encoding type */ + seqHead = op++; + + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart)); + + if (writeEntropy) { + const U32 LLtype = fseMetadata->llType; + const U32 Offtype = fseMetadata->ofType; + const U32 MLtype = fseMetadata->mlType; + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize); + *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); + memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize); + op += fseMetadata->fseTablesSize; + } else { + const U32 repeat = set_repeat; + *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2)); + } + + { size_t const bitstreamSize = ZSTD_encodeSequences( + op, oend - op, + fseTables->matchlengthCTable, mlCode, + fseTables->offcodeCTable, ofCode, + fseTables->litlengthCTable, llCode, + sequences, nbSeq, + longOffsets, bmi2); + FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed"); + op += bitstreamSize; + /* zstd versions <= 1.3.4 mistakenly report corruption when + * FSE_readNCount() receives a buffer < 4 bytes. + * Fixed by https://github.com/facebook/zstd/pull/1146. + * This can happen when the last set_compressed table present is 2 + * bytes and the bitstream is only one byte. + * In this exceedingly rare case, we will simply emit an uncompressed + * block, since it isn't worth optimizing. + */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) { + /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */ + assert(fseMetadata->lastCountSize + bitstreamSize == 3); + DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by " + "emitting an uncompressed block."); + return 0; + } +#endif + DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize); + } + + /* zstd versions <= 1.4.0 mistakenly report error when + * sequences section body size is less than 3 bytes. + * Fixed by https://github.com/facebook/zstd/pull/1664. + * This can happen when the previous sequences section block is compressed + * with rle mode and the current block's sequences section is compressed + * with repeat mode where sequences section body size can be 1 byte. + */ +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + if (op-seqHead < 4) { + DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting " + "an uncompressed block when sequences are < 4 bytes"); + return 0; + } +#endif + + *entropyWritten = 1; + return op - ostart; +} + +/** ZSTD_compressSubBlock() : + * Compresses a single sub-block. + * @return : compressed size of the sub-block + * Or 0 if it failed to compress. */ +static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + const seqDef* sequences, size_t nbSeq, + const BYTE* literals, size_t litSize, + const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const int bmi2, + int writeLitEntropy, int writeSeqEntropy, + int* litEntropyWritten, int* seqEntropyWritten, + U32 lastBlock) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart + ZSTD_blockHeaderSize; + DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)", + litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock); + { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable, + &entropyMetadata->hufMetadata, literals, litSize, + op, oend-op, bmi2, writeLitEntropy, litEntropyWritten); + FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed"); + if (cLitSize == 0) return 0; + op += cLitSize; + } + { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse, + &entropyMetadata->fseMetadata, + sequences, nbSeq, + llCode, mlCode, ofCode, + cctxParams, + op, oend-op, + bmi2, writeSeqEntropy, seqEntropyWritten); + FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed"); + if (cSeqSize == 0) return 0; + op += cSeqSize; + } + /* Write block header */ + { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize; + U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(ostart, cBlockHeader24); + } + return op-ostart; +} + +static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize, + const ZSTD_hufCTables_t* huf, + const ZSTD_hufCTablesMetadata_t* hufMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) +{ + unsigned* const countWksp = (unsigned*)workspace; + unsigned maxSymbolValue = 255; + size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ + + if (hufMetadata->hType == set_basic) return litSize; + else if (hufMetadata->hType == set_rle) return 1; + else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) { + size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize); + if (ZSTD_isError(largest)) return litSize; + { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue); + if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize; + return cLitSizeEstimate + literalSectionHeaderSize; + } } + assert(0); /* impossible */ + return 0; +} + +static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, + const BYTE* codeTable, unsigned maxCode, + size_t nbSeq, const FSE_CTable* fseCTable, + const U32* additionalBits, + short const* defaultNorm, U32 defaultNormLog, + void* workspace, size_t wkspSize) +{ + unsigned* const countWksp = (unsigned*)workspace; + const BYTE* ctp = codeTable; + const BYTE* const ctStart = ctp; + const BYTE* const ctEnd = ctStart + nbSeq; + size_t cSymbolTypeSizeEstimateInBits = 0; + unsigned max = maxCode; + + HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */ + if (type == set_basic) { + cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max); + } else if (type == set_rle) { + cSymbolTypeSizeEstimateInBits = 0; + } else if (type == set_compressed || type == set_repeat) { + cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max); + } + if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10; + while (ctp < ctEnd) { + if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp]; + else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */ + ctp++; + } + return cSymbolTypeSizeEstimateInBits / 8; +} + +static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_fseCTables_t* fseTables, + const ZSTD_fseCTablesMetadata_t* fseMetadata, + void* workspace, size_t wkspSize, + int writeEntropy) +{ + size_t sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */ + size_t cSeqSizeEstimate = 0; + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff, + nbSeq, fseTables->offcodeCTable, NULL, + OF_defaultNorm, OF_defaultNormLog, + workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL, + nbSeq, fseTables->litlengthCTable, LL_bits, + LL_defaultNorm, LL_defaultNormLog, + workspace, wkspSize); + cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML, + nbSeq, fseTables->matchlengthCTable, ML_bits, + ML_defaultNorm, ML_defaultNormLog, + workspace, wkspSize); + if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize; + return cSeqSizeEstimate + sequencesSectionHeaderSize; +} + +static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize, + const BYTE* ofCodeTable, + const BYTE* llCodeTable, + const BYTE* mlCodeTable, + size_t nbSeq, + const ZSTD_entropyCTables_t* entropy, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + void* workspace, size_t wkspSize, + int writeLitEntropy, int writeSeqEntropy) { + size_t cSizeEstimate = 0; + cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize, + &entropy->huf, &entropyMetadata->hufMetadata, + workspace, wkspSize, writeLitEntropy); + cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable, + nbSeq, &entropy->fse, &entropyMetadata->fseMetadata, + workspace, wkspSize, writeSeqEntropy); + return cSizeEstimate + ZSTD_blockHeaderSize; +} + +static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata) +{ + if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle) + return 1; + if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle) + return 1; + if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle) + return 1; + return 0; +} + +/** ZSTD_compressSubBlock_multi() : + * Breaks super-block into multiple sub-blocks and compresses them. + * Entropy will be written to the first block. + * The following blocks will use repeat mode to compress. + * All sub-blocks are compressed blocks (no raw or rle blocks). + * @return : compressed size of the super block (which is multiple ZSTD blocks) + * Or 0 if it failed to compress. */ +static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, + const ZSTD_compressedBlockState_t* prevCBlock, + ZSTD_compressedBlockState_t* nextCBlock, + const ZSTD_entropyCTablesMetadata_t* entropyMetadata, + const ZSTD_CCtx_params* cctxParams, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const int bmi2, U32 lastBlock, + void* workspace, size_t wkspSize) +{ + const seqDef* const sstart = seqStorePtr->sequencesStart; + const seqDef* const send = seqStorePtr->sequences; + const seqDef* sp = sstart; + const BYTE* const lstart = seqStorePtr->litStart; + const BYTE* const lend = seqStorePtr->lit; + const BYTE* lp = lstart; + BYTE const* ip = (BYTE const*)src; + BYTE const* const iend = ip + srcSize; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + const BYTE* llCodePtr = seqStorePtr->llCode; + const BYTE* mlCodePtr = seqStorePtr->mlCode; + const BYTE* ofCodePtr = seqStorePtr->ofCode; + size_t targetCBlockSize = cctxParams->targetCBlockSize; + size_t litSize, seqCount; + int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed; + int writeSeqEntropy = 1; + int lastSequence = 0; + + DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)", + (unsigned)(lend-lp), (unsigned)(send-sstart)); + + litSize = 0; + seqCount = 0; + do { + size_t cBlockSizeEstimate = 0; + if (sstart == send) { + lastSequence = 1; + } else { + const seqDef* const sequence = sp + seqCount; + lastSequence = sequence == send - 1; + litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength; + seqCount++; + } + if (lastSequence) { + assert(lp <= lend); + assert(litSize <= (size_t)(lend - lp)); + litSize = (size_t)(lend - lp); + } + /* I think there is an optimization opportunity here. + * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful + * since it recalculates estimate from scratch. + * For example, it would recount literal distribution and symbol codes everytime. + */ + cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, + &nextCBlock->entropy, entropyMetadata, + workspace, wkspSize, writeLitEntropy, writeSeqEntropy); + if (cBlockSizeEstimate > targetCBlockSize || lastSequence) { + int litEntropyWritten = 0; + int seqEntropyWritten = 0; + const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence); + const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata, + sp, seqCount, + lp, litSize, + llCodePtr, mlCodePtr, ofCodePtr, + cctxParams, + op, oend-op, + bmi2, writeLitEntropy, writeSeqEntropy, + &litEntropyWritten, &seqEntropyWritten, + lastBlock && lastSequence); + FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed"); + if (cSize > 0 && cSize < decompressedSize) { + DEBUGLOG(5, "Committed the sub-block"); + assert(ip + decompressedSize <= iend); + ip += decompressedSize; + sp += seqCount; + lp += litSize; + op += cSize; + llCodePtr += seqCount; + mlCodePtr += seqCount; + ofCodePtr += seqCount; + litSize = 0; + seqCount = 0; + /* Entropy only needs to be written once */ + if (litEntropyWritten) { + writeLitEntropy = 0; + } + if (seqEntropyWritten) { + writeSeqEntropy = 0; + } + } + } + } while (!lastSequence); + if (writeLitEntropy) { + DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten"); + memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf)); + } + if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) { + /* If we haven't written our entropy tables, then we've violated our contract and + * must emit an uncompressed block. + */ + DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten"); + return 0; + } + if (ip < iend) { + size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock); + DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip)); + FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed"); + assert(cSize != 0); + op += cSize; + /* We have to regenerate the repcodes because we've skipped some sequences */ + if (sp < send) { + seqDef const* seq; + repcodes_t rep; + memcpy(&rep, prevCBlock->rep, sizeof(rep)); + for (seq = sstart; seq < sp; ++seq) { + rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); + } + memcpy(nextCBlock->rep, &rep, sizeof(rep)); + } + } + DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed"); + return op-ostart; +} + +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + void const* src, size_t srcSize, + unsigned lastBlock) { + ZSTD_entropyCTablesMetadata_t entropyMetadata; + + FORWARD_IF_ERROR(ZSTD_buildSuperBlockEntropy(&zc->seqStore, + &zc->blockState.prevCBlock->entropy, + &zc->blockState.nextCBlock->entropy, + &zc->appliedParams, + &entropyMetadata, + zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); + + return ZSTD_compressSubBlock_multi(&zc->seqStore, + zc->blockState.prevCBlock, + zc->blockState.nextCBlock, + &entropyMetadata, + &zc->appliedParams, + dst, dstCapacity, + src, srcSize, + zc->bmi2, lastBlock, + zc->entropyWorkspace, HUF_WORKSPACE_SIZE /* statically allocated in resetCCtx */); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.h b/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.h new file mode 100644 index 0000000000..07f4cb1dc6 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_compress_superblock.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_COMPRESS_ADVANCED_H +#define ZSTD_COMPRESS_ADVANCED_H + +/*-************************************* +* Dependencies +***************************************/ + +#include "../zstd.h" /* ZSTD_CCtx */ + +/*-************************************* +* Target Compressed Block Size +***************************************/ + +/* ZSTD_compressSuperBlock() : + * Used to compress a super block when targetCBlockSize is being used. + * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */ +size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + void const* src, size_t srcSize, + unsigned lastBlock); + +#endif /* ZSTD_COMPRESS_ADVANCED_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_cwksp.h b/src/borg/algorithms/zstd/lib/compress/zstd_cwksp.h new file mode 100644 index 0000000000..a25c9263b7 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_cwksp.h @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CWKSP_H +#define ZSTD_CWKSP_H + +/*-************************************* +* Dependencies +***************************************/ +#include "../common/zstd_internal.h" + +#if defined (__cplusplus) +extern "C" { +#endif + +/*-************************************* +* Constants +***************************************/ + +/* Since the workspace is effectively its own little malloc implementation / + * arena, when we run under ASAN, we should similarly insert redzones between + * each internal element of the workspace, so ASAN will catch overruns that + * reach outside an object but that stay inside the workspace. + * + * This defines the size of that redzone. + */ +#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE +#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 +#endif + +/*-************************************* +* Structures +***************************************/ +typedef enum { + ZSTD_cwksp_alloc_objects, + ZSTD_cwksp_alloc_buffers, + ZSTD_cwksp_alloc_aligned +} ZSTD_cwksp_alloc_phase_e; + +/** + * Zstd fits all its internal datastructures into a single continuous buffer, + * so that it only needs to perform a single OS allocation (or so that a buffer + * can be provided to it and it can perform no allocations at all). This buffer + * is called the workspace. + * + * Several optimizations complicate that process of allocating memory ranges + * from this workspace for each internal datastructure: + * + * - These different internal datastructures have different setup requirements: + * + * - The static objects need to be cleared once and can then be trivially + * reused for each compression. + * + * - Various buffers don't need to be initialized at all--they are always + * written into before they're read. + * + * - The matchstate tables have a unique requirement that they don't need + * their memory to be totally cleared, but they do need the memory to have + * some bound, i.e., a guarantee that all values in the memory they've been + * allocated is less than some maximum value (which is the starting value + * for the indices that they will then use for compression). When this + * guarantee is provided to them, they can use the memory without any setup + * work. When it can't, they have to clear the area. + * + * - These buffers also have different alignment requirements. + * + * - We would like to reuse the objects in the workspace for multiple + * compressions without having to perform any expensive reallocation or + * reinitialization work. + * + * - We would like to be able to efficiently reuse the workspace across + * multiple compressions **even when the compression parameters change** and + * we need to resize some of the objects (where possible). + * + * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp + * abstraction was created. It works as follows: + * + * Workspace Layout: + * + * [ ... workspace ... ] + * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] + * + * The various objects that live in the workspace are divided into the + * following categories, and are allocated separately: + * + * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, + * so that literally everything fits in a single buffer. Note: if present, + * this must be the first object in the workspace, since ZSTD_free{CCtx, + * CDict}() rely on a pointer comparison to see whether one or two frees are + * required. + * + * - Fixed size objects: these are fixed-size, fixed-count objects that are + * nonetheless "dynamically" allocated in the workspace so that we can + * control how they're initialized separately from the broader ZSTD_CCtx. + * Examples: + * - Entropy Workspace + * - 2 x ZSTD_compressedBlockState_t + * - CDict dictionary contents + * + * - Tables: these are any of several different datastructures (hash tables, + * chain tables, binary trees) that all respect a common format: they are + * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). + * Their sizes depend on the cparams. + * + * - Aligned: these buffers are used for various purposes that require 4 byte + * alignment, but don't require any initialization before they're used. + * + * - Buffers: these buffers are used for various purposes that don't require + * any alignment or initialization before they're used. This means they can + * be moved around at no cost for a new compression. + * + * Allocating Memory: + * + * The various types of objects must be allocated in order, so they can be + * correctly packed into the workspace buffer. That order is: + * + * 1. Objects + * 2. Buffers + * 3. Aligned + * 4. Tables + * + * Attempts to reserve objects of different types out of order will fail. + */ +typedef struct { + void* workspace; + void* workspaceEnd; + + void* objectEnd; + void* tableEnd; + void* tableValidEnd; + void* allocStart; + + int allocFailed; + int workspaceOversizedDuration; + ZSTD_cwksp_alloc_phase_e phase; +} ZSTD_cwksp; + +/*-************************************* +* Functions +***************************************/ + +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); + +MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { + (void)ws; + assert(ws->workspace <= ws->objectEnd); + assert(ws->objectEnd <= ws->tableEnd); + assert(ws->objectEnd <= ws->tableValidEnd); + assert(ws->tableEnd <= ws->allocStart); + assert(ws->tableValidEnd <= ws->allocStart); + assert(ws->allocStart <= ws->workspaceEnd); +} + +/** + * Align must be a power of 2. + */ +MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { + size_t const mask = align - 1; + assert((align & mask) == 0); + return (size + mask) & ~mask; +} + +/** + * Use this to determine how much space in the workspace we will consume to + * allocate this object. (Normally it should be exactly the size of the object, + * but under special conditions, like ASAN, where we pad each object, it might + * be larger.) + * + * Since tables aren't currently redzoned, you don't need to call through this + * to figure out how much space you need for the matchState tables. Everything + * else is though. + */ +MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; +#else + return size; +#endif +} + +MEM_STATIC void ZSTD_cwksp_internal_advance_phase( + ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) { + assert(phase >= ws->phase); + if (phase > ws->phase) { + if (ws->phase < ZSTD_cwksp_alloc_buffers && + phase >= ZSTD_cwksp_alloc_buffers) { + ws->tableValidEnd = ws->objectEnd; + } + if (ws->phase < ZSTD_cwksp_alloc_aligned && + phase >= ZSTD_cwksp_alloc_aligned) { + /* If unaligned allocations down from a too-large top have left us + * unaligned, we need to realign our alloc ptr. Technically, this + * can consume space that is unaccounted for in the neededSpace + * calculation. However, I believe this can only happen when the + * workspace is too large, and specifically when it is too large + * by a larger margin than the space that will be consumed. */ + /* TODO: cleaner, compiler warning friendly way to do this??? */ + ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1)); + if (ws->allocStart < ws->tableValidEnd) { + ws->tableValidEnd = ws->allocStart; + } + } + ws->phase = phase; + } +} + +/** + * Returns whether this object/buffer/etc was allocated in this workspace. + */ +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { + return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); +} + +/** + * Internal function. Do not use directly. + */ +MEM_STATIC void* ZSTD_cwksp_reserve_internal( + ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) { + void* alloc; + void* bottom = ws->tableEnd; + ZSTD_cwksp_internal_advance_phase(ws, phase); + alloc = (BYTE *)ws->allocStart - bytes; + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* over-reserve space */ + alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; +#endif + + DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", + alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(alloc >= bottom); + if (alloc < bottom) { + DEBUGLOG(4, "cwksp: alloc failed!"); + ws->allocFailed = 1; + return NULL; + } + if (alloc < ws->tableValidEnd) { + ws->tableValidEnd = alloc; + } + ws->allocStart = alloc; + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on + * either size. */ + alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; + __asan_unpoison_memory_region(alloc, bytes); +#endif + + return alloc; +} + +/** + * Reserves and returns unaligned memory. + */ +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) { + return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); +} + +/** + * Reserves and returns memory sized on and aligned on sizeof(unsigned). + */ +MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) { + assert((bytes & (sizeof(U32)-1)) == 0); + return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned); +} + +/** + * Aligned on sizeof(unsigned). These buffers have the special property that + * their values remain constrained, allowing us to re-use them without + * memset()-ing them. + */ +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { + const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; + void* alloc = ws->tableEnd; + void* end = (BYTE *)alloc + bytes; + void* top = ws->allocStart; + + DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining", + alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); + assert((bytes & (sizeof(U32)-1)) == 0); + ZSTD_cwksp_internal_advance_phase(ws, phase); + ZSTD_cwksp_assert_internal_consistency(ws); + assert(end <= top); + if (end > top) { + DEBUGLOG(4, "cwksp: table alloc failed!"); + ws->allocFailed = 1; + return NULL; + } + ws->tableEnd = end; + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + __asan_unpoison_memory_region(alloc, bytes); +#endif + + return alloc; +} + +/** + * Aligned on sizeof(void*). + */ +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { + size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); + void* alloc = ws->objectEnd; + void* end = (BYTE*)alloc + roundedBytes; + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* over-reserve space */ + end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; +#endif + + DEBUGLOG(5, + "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", + alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); + assert(((size_t)alloc & (sizeof(void*)-1)) == 0); + assert((bytes & (sizeof(void*)-1)) == 0); + ZSTD_cwksp_assert_internal_consistency(ws); + /* we must be in the first phase, no advance is possible */ + if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { + DEBUGLOG(4, "cwksp: object alloc failed!"); + ws->allocFailed = 1; + return NULL; + } + ws->objectEnd = end; + ws->tableEnd = end; + ws->tableValidEnd = end; + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on + * either size. */ + alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; + __asan_unpoison_memory_region(alloc, bytes); +#endif + + return alloc; +} + +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); + +#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) + /* To validate that the table re-use logic is sound, and that we don't + * access table space that we haven't cleaned, we re-"poison" the table + * space every time we mark it dirty. */ + { + size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; + assert(__msan_test_shadow(ws->objectEnd, size) == -1); + __msan_poison(ws->objectEnd, size); + } +#endif + + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + ws->tableValidEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); +} + +MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean"); + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) { + ws->tableValidEnd = ws->tableEnd; + } + ZSTD_cwksp_assert_internal_consistency(ws); +} + +/** + * Zero the part of the allocated tables not already marked clean. + */ +MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables"); + assert(ws->tableValidEnd >= ws->objectEnd); + assert(ws->tableValidEnd <= ws->allocStart); + if (ws->tableValidEnd < ws->tableEnd) { + memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); + } + ZSTD_cwksp_mark_tables_clean(ws); +} + +/** + * Invalidates table allocations. + * All other allocations remain valid. + */ +MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: clearing tables!"); + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + { + size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd; + __asan_poison_memory_region(ws->objectEnd, size); + } +#endif + + ws->tableEnd = ws->objectEnd; + ZSTD_cwksp_assert_internal_consistency(ws); +} + +/** + * Invalidates all buffer, aligned, and table allocations. + * Object allocations remain valid. + */ +MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { + DEBUGLOG(4, "cwksp: clearing!"); + +#if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) + /* To validate that the context re-use logic is sound, and that we don't + * access stuff that this compression hasn't initialized, we re-"poison" + * the workspace (or at least the non-static, non-table parts of it) + * every time we start a new compression. */ + { + size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd; + __msan_poison(ws->tableValidEnd, size); + } +#endif + +#if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) + { + size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd; + __asan_poison_memory_region(ws->objectEnd, size); + } +#endif + + ws->tableEnd = ws->objectEnd; + ws->allocStart = ws->workspaceEnd; + ws->allocFailed = 0; + if (ws->phase > ZSTD_cwksp_alloc_buffers) { + ws->phase = ZSTD_cwksp_alloc_buffers; + } + ZSTD_cwksp_assert_internal_consistency(ws); +} + +/** + * The provided workspace takes ownership of the buffer [start, start+size). + * Any existing values in the workspace are ignored (the previously managed + * buffer, if present, must be separately freed). + */ +MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) { + DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size); + assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ + ws->workspace = start; + ws->workspaceEnd = (BYTE*)start + size; + ws->objectEnd = ws->workspace; + ws->tableValidEnd = ws->objectEnd; + ws->phase = ZSTD_cwksp_alloc_objects; + ZSTD_cwksp_clear(ws); + ws->workspaceOversizedDuration = 0; + ZSTD_cwksp_assert_internal_consistency(ws); +} + +MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { + void* workspace = ZSTD_malloc(size, customMem); + DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size); + RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!"); + ZSTD_cwksp_init(ws, workspace, size); + return 0; +} + +MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { + void *ptr = ws->workspace; + DEBUGLOG(4, "cwksp: freeing workspace"); + memset(ws, 0, sizeof(ZSTD_cwksp)); + ZSTD_free(ptr, customMem); +} + +/** + * Moves the management of a workspace from one cwksp to another. The src cwksp + * is left in an invalid state (src must be re-init()'ed before its used again). + */ +MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { + *dst = *src; + memset(src, 0, sizeof(ZSTD_cwksp)); +} + +MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); +} + +MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { + return ws->allocFailed; +} + +/*-************************************* +* Functions Checking Free Space +***************************************/ + +MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { + return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); +} + +MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { + return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; +} + +MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { + return ZSTD_cwksp_check_available( + ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); +} + +MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { + return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) + && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; +} + +MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( + ZSTD_cwksp* ws, size_t additionalNeededSpace) { + if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { + ws->workspaceOversizedDuration++; + } else { + ws->workspaceOversizedDuration = 0; + } +} + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_CWKSP_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.c b/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.c new file mode 100644 index 0000000000..27eed66cfe --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.c @@ -0,0 +1,521 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "zstd_double_fast.h" + + +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashLarge = ms->hashTable; + U32 const hBitsL = cParams->hashLog; + U32 const mls = cParams->minMatch; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; + + /* Always insert every fastHashFillStep position into the hash tables. + * Insert the other positions into the large hash table if their entry + * is empty. + */ + for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) { + U32 const current = (U32)(ip - base); + U32 i; + for (i = 0; i < fastHashFillStep; ++i) { + size_t const smHash = ZSTD_hashPtr(ip + i, hBitsS, mls); + size_t const lgHash = ZSTD_hashPtr(ip + i, hBitsL, 8); + if (i == 0) + hashSmall[smHash] = current + i; + if (i == 0 || hashLarge[lgHash] == 0) + hashLarge[lgHash] = current + i; + /* Only load extra positions for ZSTD_dtlm_full */ + if (dtlm == ZSTD_dtlm_fast) + break; + } } +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_doubleFast_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, + U32 const mls /* template */, ZSTD_dictMode_e const dictMode) +{ + ZSTD_compressionParameters const* cParams = &ms->cParams; + U32* const hashLong = ms->hashTable; + const U32 hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + const U32 hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + const BYTE* const prefixLowest = base + prefixLowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_compressionParameters* const dictCParams = + dictMode == ZSTD_dictMatchState ? + &dms->cParams : NULL; + const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? + dms->hashTable : NULL; + const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? + dms->chainTable : NULL; + const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? + dms->window.dictLimit : 0; + const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? + dms->window.base : NULL; + const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? + dictBase + dictStartIndex : NULL; + const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? + dms->window.nextSrc : NULL; + const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? + prefixLowestIndex - (U32)(dictEnd - dictBase) : + 0; + const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? + dictCParams->hashLog : hBitsL; + const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? + dictCParams->chainLog : hBitsS; + const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); + + assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); + + /* if a dictionary is attached, it must be within window range */ + if (dictMode == ZSTD_dictMatchState) { + assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); + } + + /* init */ + ip += (dictAndPrefixLength == 0); + if (dictMode == ZSTD_noDict) { + U32 const current = (U32)(ip - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + U32 const maxRep = current - windowLow; + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + if (dictMode == ZSTD_dictMatchState) { + /* dictMatchState repCode checks don't currently handle repCode == 0 + * disabling. */ + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + U32 offset; + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); + size_t const dictHL = ZSTD_hashPtr(ip, dictHBitsL, 8); + size_t const dictHS = ZSTD_hashPtr(ip, dictHBitsS, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndexL = hashLong[h2]; + U32 matchIndexS = hashSmall[h]; + const BYTE* matchLong = base + matchIndexL; + const BYTE* match = base + matchIndexS; + const U32 repIndex = current + 1 - offset_1; + const BYTE* repMatch = (dictMode == ZSTD_dictMatchState + && repIndex < prefixLowestIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + hashLong[h2] = hashSmall[h] = current; /* update hash tables */ + + /* check dictMatchState repcode */ + if (dictMode == ZSTD_dictMatchState + && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + goto _match_stored; + } + + /* check noDict repcode */ + if ( dictMode == ZSTD_noDict + && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + goto _match_stored; + } + + if (matchIndexL > prefixLowestIndex) { + /* check prefix long match */ + if (MEM_read64(matchLong) == MEM_read64(ip)) { + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + goto _match_found; + } + } else if (dictMode == ZSTD_dictMatchState) { + /* check dictMatchState long match */ + U32 const dictMatchIndexL = dictHashLong[dictHL]; + const BYTE* dictMatchL = dictBase + dictMatchIndexL; + assert(dictMatchL < dictEnd); + + if (dictMatchL > dictStart && MEM_read64(dictMatchL) == MEM_read64(ip)) { + mLength = ZSTD_count_2segments(ip+8, dictMatchL+8, iend, dictEnd, prefixLowest) + 8; + offset = (U32)(current - dictMatchIndexL - dictIndexDelta); + while (((ip>anchor) & (dictMatchL>dictStart)) && (ip[-1] == dictMatchL[-1])) { ip--; dictMatchL--; mLength++; } /* catch up */ + goto _match_found; + } } + + if (matchIndexS > prefixLowestIndex) { + /* check prefix short match */ + if (MEM_read32(match) == MEM_read32(ip)) { + goto _search_next_long; + } + } else if (dictMode == ZSTD_dictMatchState) { + /* check dictMatchState short match */ + U32 const dictMatchIndexS = dictHashSmall[dictHS]; + match = dictBase + dictMatchIndexS; + matchIndexS = dictMatchIndexS + dictIndexDelta; + + if (match > dictStart && MEM_read32(match) == MEM_read32(ip)) { + goto _search_next_long; + } } + + ip += ((ip-anchor) >> kSearchStrength) + 1; +#if defined(__aarch64__) + PREFETCH_L1(ip+256); +#endif + continue; + +_search_next_long: + + { size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + size_t const dictHLNext = ZSTD_hashPtr(ip+1, dictHBitsL, 8); + U32 const matchIndexL3 = hashLong[hl3]; + const BYTE* matchL3 = base + matchIndexL3; + hashLong[hl3] = current + 1; + + /* check prefix long +1 match */ + if (matchIndexL3 > prefixLowestIndex) { + if (MEM_read64(matchL3) == MEM_read64(ip+1)) { + mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8; + ip++; + offset = (U32)(ip-matchL3); + while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ + goto _match_found; + } + } else if (dictMode == ZSTD_dictMatchState) { + /* check dict long +1 match */ + U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; + const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; + assert(dictMatchL3 < dictEnd); + if (dictMatchL3 > dictStart && MEM_read64(dictMatchL3) == MEM_read64(ip+1)) { + mLength = ZSTD_count_2segments(ip+1+8, dictMatchL3+8, iend, dictEnd, prefixLowest) + 8; + ip++; + offset = (U32)(current + 1 - dictMatchIndexL3 - dictIndexDelta); + while (((ip>anchor) & (dictMatchL3>dictStart)) && (ip[-1] == dictMatchL3[-1])) { ip--; dictMatchL3--; mLength++; } /* catch up */ + goto _match_found; + } } } + + /* if no long +1 match, explore the short match we found */ + if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { + mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; + offset = (U32)(current - matchIndexS); + while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } else { + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip - match); + while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + + /* fall-through */ + +_match_found: + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + +_match_stored: + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = current+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } + + /* check immediate repcode */ + if (dictMode == ZSTD_dictMatchState) { + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState + && repIndex2 < prefixLowestIndex ? + dictBase + repIndex2 - dictIndexDelta : + base + repIndex2; + if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } + + if (dictMode == ZSTD_noDict) { + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + } /* while (ip < ilimit) */ + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_doubleFast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + const U32 mls = ms->cParams.minMatch; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); + case 5 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); + case 6 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); + case 7 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); + } +} + + +size_t ZSTD_compressBlock_doubleFast_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + const U32 mls = ms->cParams.minMatch; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); + case 5 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); + case 6 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); + case 7 : + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); + } +} + + +static size_t ZSTD_compressBlock_doubleFast_extDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, + U32 const mls /* template */) +{ + ZSTD_compressionParameters const* cParams = &ms->cParams; + U32* const hashLong = ms->hashTable; + U32 const hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + U32 const hBitsS = cParams->chainLog; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); + const U32 dictStartIndex = lowLimit; + const U32 dictLimit = ms->window.dictLimit; + const U32 prefixStartIndex = (dictLimit > lowLimit) ? dictLimit : lowLimit; + const BYTE* const prefixStart = base + prefixStartIndex; + const BYTE* const dictBase = ms->window.dictBase; + const BYTE* const dictStart = dictBase + dictStartIndex; + const BYTE* const dictEnd = dictBase + prefixStartIndex; + U32 offset_1=rep[0], offset_2=rep[1]; + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_extDict_generic (srcSize=%zu)", srcSize); + + /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ + if (prefixStartIndex == dictStartIndex) + return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 matchIndex = hashSmall[hSmall]; + const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + + const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); + const U32 matchLongIndex = hashLong[hLong]; + const BYTE* const matchLongBase = matchLongIndex < prefixStartIndex ? dictBase : base; + const BYTE* matchLong = matchLongBase + matchLongIndex; + + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + size_t mLength; + hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ + + if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ + & (repIndex > dictStartIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + } else { + if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { + const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; + const BYTE* const lowMatchPtr = matchLongIndex < prefixStartIndex ? dictStart : prefixStart; + U32 offset; + mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, prefixStart) + 8; + offset = current - matchLongIndex; + while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* const match3Base = matchIndex3 < prefixStartIndex ? dictBase : base; + const BYTE* match3 = match3Base + matchIndex3; + U32 offset; + hashLong[h3] = current + 1; + if ( (matchIndex3 > dictStartIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + const BYTE* const matchEnd = matchIndex3 < prefixStartIndex ? dictEnd : iend; + const BYTE* const lowMatchPtr = matchIndex3 < prefixStartIndex ? dictStart : prefixStart; + mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, prefixStart) + 8; + ip++; + offset = current+1 - matchIndex3; + while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; + const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; + offset = current - matchIndex; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else { + ip += ((ip-anchor) >> kSearchStrength) + 1; + continue; + } } + + /* move to next sequence start */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = current+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } + + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ + & (repIndex2 > dictStartIndex)) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + rep[0] = offset_1; + rep[1] = offset_2; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_doubleFast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + U32 const mls = ms->cParams.minMatch; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + } +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.h b/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.h new file mode 100644 index 0000000000..14d944d69b --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_double_fast.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_DOUBLE_FAST_H +#define ZSTD_DOUBLE_FAST_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "../common/mem.h" /* U32 */ +#include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */ + +void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm); +size_t ZSTD_compressBlock_doubleFast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_doubleFast_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_doubleFast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_DOUBLE_FAST_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_fast.c b/src/borg/algorithms/zstd/lib/compress/zstd_fast.c new file mode 100644 index 0000000000..85a3a7a91e --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_fast.c @@ -0,0 +1,496 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ +#include "zstd_fast.h" + + +void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + const void* const end, + ZSTD_dictTableLoadMethod_e dtlm) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hBits = cParams->hashLog; + U32 const mls = cParams->minMatch; + const BYTE* const base = ms->window.base; + const BYTE* ip = base + ms->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const U32 fastHashFillStep = 3; + + /* Always insert every fastHashFillStep position into the hash table. + * Insert the other positions if their hash entry is empty. + */ + for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) { + U32 const current = (U32)(ip - base); + size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls); + hashTable[hash0] = current; + if (dtlm == ZSTD_dtlm_fast) continue; + /* Only load extra positions for ZSTD_dtlm_full */ + { U32 p; + for (p = 1; p < fastHashFillStep; ++p) { + size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls); + if (hashTable[hash] == 0) { /* not yet filled */ + hashTable[hash] = current + p; + } } } } +} + + +FORCE_INLINE_TEMPLATE size_t +ZSTD_compressBlock_fast_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, + U32 const mls) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hlog = cParams->hashLog; + /* support stepSize of 0 */ + size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* anchor = istart; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + const BYTE* const prefixStart = base + prefixStartIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + /* init */ + DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); + ip0 += (ip0 == prefixStart); + ip1 = ip0 + 1; + { U32 const current = (U32)(ip0 - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + U32 const maxRep = current - windowLow; + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ +#ifdef __INTEL_COMPILER + /* From intel 'The vector pragma indicates that the loop should be + * vectorized if it is legal to do so'. Can be used together with + * #pragma ivdep (but have opted to exclude that because intel + * warns against using it).*/ + #pragma vector always +#endif + while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ + size_t mLength; + BYTE const* ip2 = ip0 + 2; + size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); + U32 const val0 = MEM_read32(ip0); + size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); + U32 const val1 = MEM_read32(ip1); + U32 const current0 = (U32)(ip0-base); + U32 const current1 = (U32)(ip1-base); + U32 const matchIndex0 = hashTable[h0]; + U32 const matchIndex1 = hashTable[h1]; + BYTE const* repMatch = ip2 - offset_1; + const BYTE* match0 = base + matchIndex0; + const BYTE* match1 = base + matchIndex1; + U32 offcode; + +#if defined(__aarch64__) + PREFETCH_L1(ip0+256); +#endif + + hashTable[h0] = current0; /* update hash table */ + hashTable[h1] = current1; /* update hash table */ + + assert(ip0 + 1 == ip1); + + if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { + mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0; + ip0 = ip2 - mLength; + match0 = repMatch - mLength; + mLength += 4; + offcode = 0; + goto _match; + } + if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { + /* found a regular match */ + goto _offset; + } + if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { + /* found a regular match after one literal */ + ip0 = ip1; + match0 = match1; + goto _offset; + } + { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize; + assert(step >= 2); + ip0 += step; + ip1 += step; + continue; + } +_offset: /* Requires: ip0, match0 */ + /* Compute the offset code */ + offset_2 = offset_1; + offset_1 = (U32)(ip0-match0); + offcode = offset_1 + ZSTD_REP_MOVE; + mLength = 4; + /* Count the backwards match length */ + while (((ip0>anchor) & (match0>prefixStart)) + && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ + +_match: /* Requires: ip0, match0, offcode */ + /* Count the forward length */ + mLength += ZSTD_count(ip0+mLength, match0+mLength, iend); + ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH); + /* match found */ + ip0 += mLength; + anchor = ip0; + + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */ + while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += rLength; + ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); + anchor = ip0; + continue; /* faster when present (confirmed on gcc-8) ... (?) */ + } } } + ip1 = ip0 + 1; + } + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_fast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + U32 const mls = ms->cParams.minMatch; + assert(ms->dictMatchState == NULL); + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); + } +} + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_fast_dictMatchState_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hlog = cParams->hashLog; + /* support stepSize of 0 */ + U32 const stepSize = cParams->targetLength + !(cParams->targetLength); + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 prefixStartIndex = ms->window.dictLimit; + const BYTE* const prefixStart = base + prefixStartIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + const ZSTD_matchState_t* const dms = ms->dictMatchState; + const ZSTD_compressionParameters* const dictCParams = &dms->cParams ; + const U32* const dictHashTable = dms->hashTable; + const U32 dictStartIndex = dms->window.dictLimit; + const BYTE* const dictBase = dms->window.base; + const BYTE* const dictStart = dictBase + dictStartIndex; + const BYTE* const dictEnd = dms->window.nextSrc; + const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase); + const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart); + const U32 dictHLog = dictCParams->hashLog; + + /* if a dictionary is still attached, it necessarily means that + * it is within window size. So we just check it. */ + const U32 maxDistance = 1U << cParams->windowLog; + const U32 endIndex = (U32)((size_t)(ip - base) + srcSize); + assert(endIndex - prefixStartIndex <= maxDistance); + (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ + + /* ensure there will be no no underflow + * when translating a dict index into a local index */ + assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); + + /* init */ + DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); + ip += (dictAndPrefixLength == 0); + /* dictMatchState repCode checks don't currently handle repCode == 0 + * disabling. */ + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h = ZSTD_hashPtr(ip, hlog, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndex = hashTable[h]; + const BYTE* match = base + matchIndex; + const U32 repIndex = current + 1 - offset_1; + const BYTE* repMatch = (repIndex < prefixStartIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + hashTable[h] = current; /* update hash table */ + + if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */ + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + } else if ( (matchIndex <= prefixStartIndex) ) { + size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); + U32 const dictMatchIndex = dictHashTable[dictHash]; + const BYTE* dictMatch = dictBase + dictMatchIndex; + if (dictMatchIndex <= dictStartIndex || + MEM_read32(dictMatch) != MEM_read32(ip)) { + assert(stepSize >= 1); + ip += ((ip-anchor) >> kSearchStrength) + stepSize; + continue; + } else { + /* found a dict match */ + U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta); + mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4; + while (((ip>anchor) & (dictMatch>dictStart)) + && (ip[-1] == dictMatch[-1])) { + ip--; dictMatch--; mLength++; + } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + } else if (MEM_read32(match) != MEM_read32(ip)) { + /* it's not a match, and we're not going to check the dictionary */ + assert(stepSize >= 1); + ip += ((ip-anchor) >> kSearchStrength) + stepSize; + continue; + } else { + /* found a regular match */ + U32 const offset = (U32)(ip-match); + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + while (((ip>anchor) & (match>prefixStart)) + && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + assert(base+current+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? + dictBase - dictIndexDelta + repIndex2 : + base + repIndex2; + if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); + hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } + } + } + + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + +size_t ZSTD_compressBlock_fast_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + U32 const mls = ms->cParams.minMatch; + assert(ms->dictMatchState != NULL); + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); + } +} + + +static size_t ZSTD_compressBlock_fast_extDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hlog = cParams->hashLog; + /* support stepSize of 0 */ + U32 const stepSize = cParams->targetLength + !(cParams->targetLength); + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog); + const U32 dictStartIndex = lowLimit; + const BYTE* const dictStart = dictBase + dictStartIndex; + const U32 dictLimit = ms->window.dictLimit; + const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit; + const BYTE* const prefixStart = base + prefixStartIndex; + const BYTE* const dictEnd = dictBase + prefixStartIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=rep[0], offset_2=rep[1]; + + DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); + + /* switch to "regular" variant if extDict is invalidated due to maxDistance */ + if (prefixStartIndex == dictStartIndex) + return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t h = ZSTD_hashPtr(ip, hlog, mls); + const U32 matchIndex = hashTable[h]; + const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; + const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + hashTable[h] = current; /* update hash table */ + DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current); + assert(offset_1 <= current +1); /* check repIndex */ + + if ( (((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > dictStartIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; + size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + } else { + if ( (matchIndex < dictStartIndex) || + (MEM_read32(match) != MEM_read32(ip)) ) { + assert(stepSize >= 1); + ip += ((ip-anchor) >> kSearchStrength) + stepSize; + continue; + } + { const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend; + const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart; + U32 const offset = current - matchIndex; + size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset_2 = offset_1; offset_1 = offset; /* update offset history */ + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ip += mLength; + anchor = ip; + } } + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; + hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (repIndex2 > dictStartIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; + { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH); + hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + rep[0] = offset_1; + rep[1] = offset_2; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_fast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + U32 const mls = ms->cParams.minMatch; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + case 5 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + case 6 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + case 7 : + return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + } +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_fast.h b/src/borg/algorithms/zstd/lib/compress/zstd_fast.h new file mode 100644 index 0000000000..cf6aaa8e67 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_fast.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_FAST_H +#define ZSTD_FAST_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "../common/mem.h" /* U32 */ +#include "zstd_compress_internal.h" + +void ZSTD_fillHashTable(ZSTD_matchState_t* ms, + void const* end, ZSTD_dictTableLoadMethod_e dtlm); +size_t ZSTD_compressBlock_fast( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_fast_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_fast_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FAST_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_lazy.c b/src/borg/algorithms/zstd/lib/compress/zstd_lazy.c new file mode 100644 index 0000000000..4cf5c88b53 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_lazy.c @@ -0,0 +1,1138 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "zstd_lazy.h" + + +/*-************************************* +* Binary Tree search +***************************************/ + +static void +ZSTD_updateDUBT(ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* iend, + U32 mls) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hashLog = cParams->hashLog; + + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + + const BYTE* const base = ms->window.base; + U32 const target = (U32)(ip - base); + U32 idx = ms->nextToUpdate; + + if (idx != target) + DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)", + idx, target, ms->window.dictLimit); + assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */ + (void)iend; + + assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */ + for ( ; idx < target ; idx++) { + size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */ + U32 const matchIndex = hashTable[h]; + + U32* const nextCandidatePtr = bt + 2*(idx&btMask); + U32* const sortMarkPtr = nextCandidatePtr + 1; + + DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx); + hashTable[h] = idx; /* Update Hash Table */ + *nextCandidatePtr = matchIndex; /* update BT like a chain */ + *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK; + } + ms->nextToUpdate = target; +} + + +/** ZSTD_insertDUBT1() : + * sort one already inserted but unsorted position + * assumption : current >= btlow == (current - btmask) + * doesn't fail */ +static void +ZSTD_insertDUBT1(ZSTD_matchState_t* ms, + U32 current, const BYTE* inputEnd, + U32 nbCompares, U32 btLow, + const ZSTD_dictMode_e dictMode) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current; + const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */ + U32 dummy32; /* to be nullified at the end */ + U32 const windowValid = ms->window.lowLimit; + U32 const maxDistance = 1U << cParams->windowLog; + U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid; + + + DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)", + current, dictLimit, windowLow); + assert(current >= btLow); + assert(ip < iend); /* condition for ZSTD_count */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + assert(matchIndex < current); + /* note : all candidates are now supposed sorted, + * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK + * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */ + + if ( (dictMode != ZSTD_extDict) + || (matchIndex+matchLength >= dictLimit) /* both in current segment*/ + || (current < dictLimit) /* both in extDict */) { + const BYTE* const mBase = ( (dictMode != ZSTD_extDict) + || (matchIndex+matchLength >= dictLimit)) ? + base : dictBase; + assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */ + || (current < dictLimit) ); + match = mBase + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* preparation for next read of match[matchLength] */ + } + + DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ", + current, matchIndex, (U32)matchLength); + + if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ + } + + if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u", + matchIndex, btLow, nextPtr[1]); + smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ + matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u", + matchIndex, btLow, nextPtr[0]); + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; +} + + +static size_t +ZSTD_DUBT_findBetterDictMatch ( + ZSTD_matchState_t* ms, + const BYTE* const ip, const BYTE* const iend, + size_t* offsetPtr, + size_t bestLength, + U32 nbCompares, + U32 const mls, + const ZSTD_dictMode_e dictMode) +{ + const ZSTD_matchState_t * const dms = ms->dictMatchState; + const ZSTD_compressionParameters* const dmsCParams = &dms->cParams; + const U32 * const dictHashTable = dms->hashTable; + U32 const hashLog = dmsCParams->hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32 dictMatchIndex = dictHashTable[h]; + + const BYTE* const base = ms->window.base; + const BYTE* const prefixStart = base + ms->window.dictLimit; + U32 const current = (U32)(ip-base); + const BYTE* const dictBase = dms->window.base; + const BYTE* const dictEnd = dms->window.nextSrc; + U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base); + U32 const dictLowLimit = dms->window.lowLimit; + U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit; + + U32* const dictBt = dms->chainTable; + U32 const btLog = dmsCParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask; + + size_t commonLengthSmaller=0, commonLengthLarger=0; + + (void)dictMode; + assert(dictMode == ZSTD_dictMatchState); + + while (nbCompares-- && (dictMatchIndex > dictLowLimit)) { + U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match = dictBase + dictMatchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (dictMatchIndex+matchLength >= dictHighLimit) + match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */ + + if (matchLength > bestLength) { + U32 matchIndex = dictMatchIndex + dictIndexDelta; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { + DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", + current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex); + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + } + if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + } + + if (match[matchLength] < ip[matchLength]) { + if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */ + commonLengthLarger = matchLength; + dictMatchIndex = nextPtr[0]; + } + } + + if (bestLength >= MINMATCH) { + U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", + current, (U32)bestLength, (U32)*offsetPtr, mIndex); + } + return bestLength; + +} + + +static size_t +ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, + const BYTE* const ip, const BYTE* const iend, + size_t* offsetPtr, + U32 const mls, + const ZSTD_dictMode_e dictMode) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hashLog = cParams->hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32 matchIndex = hashTable[h]; + + const BYTE* const base = ms->window.base; + U32 const current = (U32)(ip-base); + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog); + + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 const btLow = (btMask >= current) ? 0 : current - btMask; + U32 const unsortLimit = MAX(btLow, windowLow); + + U32* nextCandidate = bt + 2*(matchIndex&btMask); + U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1; + U32 nbCompares = 1U << cParams->searchLog; + U32 nbCandidates = nbCompares; + U32 previousCandidate = 0; + + DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current); + assert(ip <= iend-8); /* required for h calculation */ + + /* reach end of unsorted candidates list */ + while ( (matchIndex > unsortLimit) + && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK) + && (nbCandidates > 1) ) { + DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted", + matchIndex); + *unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */ + previousCandidate = matchIndex; + matchIndex = *nextCandidate; + nextCandidate = bt + 2*(matchIndex&btMask); + unsortedMark = bt + 2*(matchIndex&btMask) + 1; + nbCandidates --; + } + + /* nullify last candidate if it's still unsorted + * simplification, detrimental to compression ratio, beneficial for speed */ + if ( (matchIndex > unsortLimit) + && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) { + DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u", + matchIndex); + *nextCandidate = *unsortedMark = 0; + } + + /* batch sort stacked candidates */ + matchIndex = previousCandidate; + while (matchIndex) { /* will end on matchIndex == 0 */ + U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1; + U32 const nextCandidateIdx = *nextCandidateIdxPtr; + ZSTD_insertDUBT1(ms, matchIndex, iend, + nbCandidates, unsortLimit, dictMode); + matchIndex = nextCandidateIdx; + nbCandidates++; + } + + /* find longest match */ + { size_t commonLengthSmaller = 0, commonLengthLarger = 0; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current + 8 + 1; + U32 dummy32; /* to be nullified at the end */ + size_t bestLength = 0; + + matchIndex = hashTable[h]; + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ + if (dictMode == ZSTD_dictMatchState) { + nbCompares = 0; /* in addition to avoiding checking any + * further in this loop, make sure we + * skip checking in the dictionary. */ + } + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + if (dictMode == ZSTD_dictMatchState && nbCompares) { + bestLength = ZSTD_DUBT_findBetterDictMatch( + ms, ip, iend, + offsetPtr, bestLength, nbCompares, + mls, dictMode); + } + + assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */ + ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ + if (bestLength >= MINMATCH) { + U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", + current, (U32)bestLength, (U32)*offsetPtr, mIndex); + } + return bestLength; + } +} + + +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ +FORCE_INLINE_TEMPLATE size_t +ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls /* template */, + const ZSTD_dictMode_e dictMode) +{ + DEBUGLOG(7, "ZSTD_BtFindBestMatch"); + if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateDUBT(ms, ip, iLimit, mls); + return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); +} + + +static size_t +ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(ms->cParams.minMatch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); + case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); + } +} + + +static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS ( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(ms->cParams.minMatch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); + case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); + } +} + + +static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(ms->cParams.minMatch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); + case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); + } +} + + + +/* ********************************* +* Hash Chain +***********************************/ +#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)] + +/* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +static U32 ZSTD_insertAndFindFirstIndex_internal( + ZSTD_matchState_t* ms, + const ZSTD_compressionParameters* const cParams, + const BYTE* ip, U32 const mls) +{ + U32* const hashTable = ms->hashTable; + const U32 hashLog = cParams->hashLog; + U32* const chainTable = ms->chainTable; + const U32 chainMask = (1 << cParams->chainLog) - 1; + const BYTE* const base = ms->window.base; + const U32 target = (U32)(ip - base); + U32 idx = ms->nextToUpdate; + + while(idx < target) { /* catch up */ + size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); + NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; + hashTable[h] = idx; + idx++; + } + + ms->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; +} + +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { + const ZSTD_compressionParameters* const cParams = &ms->cParams; + return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch); +} + + +/* inlining is important to hardwire a hot branch (template emulation) */ +FORCE_INLINE_TEMPLATE +size_t ZSTD_HcFindBestMatch_generic ( + ZSTD_matchState_t* ms, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 mls, const ZSTD_dictMode_e dictMode) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const chainTable = ms->chainTable; + const U32 chainSize = (1 << cParams->chainLog); + const U32 chainMask = chainSize-1; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 current = (U32)(ip-base); + const U32 maxDistance = 1U << cParams->windowLog; + const U32 lowestValid = ms->window.lowLimit; + const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid; + const U32 isDictionary = (ms->loadedDictEnd != 0); + const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance; + const U32 minChain = current > chainSize ? current - chainSize : 0; + U32 nbAttempts = 1U << cParams->searchLog; + size_t ml=4-1; + + /* HC4 match finder */ + U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls); + + for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + size_t currentMl=0; + if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) { + const BYTE* const match = base + matchIndex; + assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */ + if (match[ml] == ip[ml]) /* potentially better */ + currentMl = ZSTD_count(ip, match, iLimit); + } else { + const BYTE* const match = dictBase + matchIndex; + assert(match+4 <= dictEnd); + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4; + } + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + + if (matchIndex <= minChain) break; + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); + } + + if (dictMode == ZSTD_dictMatchState) { + const ZSTD_matchState_t* const dms = ms->dictMatchState; + const U32* const dmsChainTable = dms->chainTable; + const U32 dmsChainSize = (1 << dms->cParams.chainLog); + const U32 dmsChainMask = dmsChainSize - 1; + const U32 dmsLowestIndex = dms->window.dictLimit; + const BYTE* const dmsBase = dms->window.base; + const BYTE* const dmsEnd = dms->window.nextSrc; + const U32 dmsSize = (U32)(dmsEnd - dmsBase); + const U32 dmsIndexDelta = dictLimit - dmsSize; + const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0; + + matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)]; + + for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) { + size_t currentMl=0; + const BYTE* const match = dmsBase + matchIndex; + assert(match+4 <= dmsEnd); + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4; + + /* save best solution */ + if (currentMl > ml) { + ml = currentMl; + *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE; + if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ + } + + if (matchIndex <= dmsMinChain) break; + matchIndex = dmsChainTable[matchIndex & dmsChainMask]; + } + } + + return ml; +} + + +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(ms->cParams.minMatch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); + case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); + } +} + + +static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS ( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(ms->cParams.minMatch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); + case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); + } +} + + +FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr) +{ + switch(ms->cParams.minMatch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); + case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); + } +} + + +/* ******************************* +* Common parser - lazy strategy +*********************************/ +typedef enum { search_hashChain, search_binaryTree } searchMethod_e; + +FORCE_INLINE_TEMPLATE size_t +ZSTD_compressBlock_lazy_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, + const searchMethod_e searchMethod, const U32 depth, + ZSTD_dictMode_e const dictMode) +{ + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base; + const U32 prefixLowestIndex = ms->window.dictLimit; + const BYTE* const prefixLowest = base + prefixLowestIndex; + + typedef size_t (*searchMax_f)( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); + searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ? + (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS + : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) : + (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS + : ZSTD_HcFindBestMatch_selectMLS); + U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; + + const ZSTD_matchState_t* const dms = ms->dictMatchState; + const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ? + dms->window.dictLimit : 0; + const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? + dms->window.base : NULL; + const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ? + dictBase + dictLowestIndex : NULL; + const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? + dms->window.nextSrc : NULL; + const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? + prefixLowestIndex - (U32)(dictEnd - dictBase) : + 0; + const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest)); + + DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode); + + /* init */ + ip += (dictAndPrefixLength == 0); + if (dictMode == ZSTD_noDict) { + U32 const current = (U32)(ip - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, ms->cParams.windowLog); + U32 const maxRep = current - windowLow; + if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; + if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + } + if (dictMode == ZSTD_dictMatchState) { + /* dictMatchState repCode checks don't currently handle repCode == 0 + * disabling. */ + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); + } + + /* Match Loop */ +#if defined(__GNUC__) && defined(__x86_64__) + /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the + * code alignment is perturbed. To fix the instability align the loop on 32-bytes. + */ + __asm__(".p2align 5"); +#endif + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + + /* check repCode */ + if (dictMode == ZSTD_dictMatchState) { + const U32 repIndex = (U32)(ip - base) + 1 - offset_1; + const BYTE* repMatch = (dictMode == ZSTD_dictMatchState + && repIndex < prefixLowestIndex) ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; + if (depth==0) goto _storeSequence; + } + } + if ( dictMode == ZSTD_noDict + && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { + matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + if (depth==0) goto _storeSequence; + } + + /* first search (depth 0) */ + { size_t offsetFound = 999999999; + size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(mlRep * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + if (dictMode == ZSTD_dictMatchState) { + const U32 repIndex = (U32)(ip - base) - offset_1; + const BYTE* repMatch = repIndex < prefixLowestIndex ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + && (MEM_read32(repMatch) == MEM_read32(ip)) ) { + const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; + int const gain2 = (int)(mlRep * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + } + { size_t offset2=999999999; + size_t const ml2 = searchMax(ms, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; + int const gain2 = (int)(mlRep * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + if (dictMode == ZSTD_dictMatchState) { + const U32 repIndex = (U32)(ip - base) - offset_1; + const BYTE* repMatch = repIndex < prefixLowestIndex ? + dictBase + (repIndex - dictIndexDelta) : + base + repIndex; + if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + && (MEM_read32(repMatch) == MEM_read32(ip)) ) { + const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; + size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; + int const gain2 = (int)(mlRep * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= 4) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + } + { size_t offset2=999999999; + size_t const ml2 = searchMax(ms, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* NOTE: + * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. + * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which + * overflows the pointer, which is undefined behavior. + */ + /* catch up */ + if (offset) { + if (dictMode == ZSTD_noDict) { + while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest)) + && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */ + { start--; matchLength++; } + } + if (dictMode == ZSTD_dictMatchState) { + U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; + const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; + while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ + } + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + if (dictMode == ZSTD_dictMatchState) { + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex = current2 - offset_2; + const BYTE* repMatch = dictMode == ZSTD_dictMatchState + && repIndex < prefixLowestIndex ? + dictBase - dictIndexDelta + repIndex : + base + repIndex; + if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */) + && (MEM_read32(repMatch) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; + } + break; + } + } + + if (dictMode == ZSTD_noDict) { + while ( ((ip <= ilimit) & (offset_2>0)) + && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { + /* store sequence */ + matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* Save reps for next block */ + rep[0] = offset_1 ? offset_1 : savedOffset; + rep[1] = offset_2 ? offset_2 : savedOffset; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_btlazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_lazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_lazy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_greedy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_lazy_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState); +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_lazy_extDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, + const searchMethod_e searchMethod, const U32 depth) +{ + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ms->window.dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const dictStart = dictBase + ms->window.lowLimit; + const U32 windowLog = ms->cParams.windowLog; + + typedef size_t (*searchMax_f)( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); + searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS; + + U32 offset_1 = rep[0], offset_2 = rep[1]; + + DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic"); + + /* init */ + ip += (ip == prefixStart); + + /* Match Loop */ +#if defined(__GNUC__) && defined(__x86_64__) + /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the + * code alignment is perturbed. To fix the instability align the loop on 32-bytes. + */ + __asm__(".p2align 5"); +#endif + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + U32 current = (U32)(ip-base); + + /* check repCode */ + { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, current+1, windowLog); + const U32 repIndex = (U32)(current+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */ + if (MEM_read32(ip+1) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4; + if (depth==0) goto _storeSequence; + } } + + /* first search (depth 0) */ + { size_t offsetFound = 999999999; + size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < 4) { + ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip= 3) & (repIndex > windowLow)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 1 */ + { size_t offset2=999999999; + size_t const ml2 = searchMax(ms, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip= 3) & (repIndex > windowLow)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + int const gain2 = (int)(repLength * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= 4) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 2 */ + { size_t offset2=999999999; + size_t const ml2 = searchMax(ms, ip, iend, &offset2); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= 4) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; + const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; + while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while (ip <= ilimit) { + const U32 repCurrent = (U32)(ip-base); + const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog); + const U32 repIndex = repCurrent - offset_2; + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + break; + } } + + /* Save reps for next block */ + rep[0] = offset_1; + rep[1] = offset_2; + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_greedy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0); +} + +size_t ZSTD_compressBlock_lazy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1); +} + +size_t ZSTD_compressBlock_lazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2); +} + +size_t ZSTD_compressBlock_btlazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) + +{ + return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_lazy.h b/src/borg/algorithms/zstd/lib/compress/zstd_lazy.h new file mode 100644 index 0000000000..581936f03b --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_lazy.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LAZY_H +#define ZSTD_LAZY_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "zstd_compress_internal.h" + +U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip); + +void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */ + +size_t ZSTD_compressBlock_btlazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +size_t ZSTD_compressBlock_btlazy2_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_greedy_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +size_t ZSTD_compressBlock_greedy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_lazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btlazy2_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_LAZY_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_ldm.c b/src/borg/algorithms/zstd/lib/compress/zstd_ldm.c new file mode 100644 index 0000000000..8c47948358 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_ldm.c @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_ldm.h" + +#include "../common/debug.h" +#include "zstd_fast.h" /* ZSTD_fillHashTable() */ +#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */ + +#define LDM_BUCKET_SIZE_LOG 3 +#define LDM_MIN_MATCH_LENGTH 64 +#define LDM_HASH_RLOG 7 +#define LDM_HASH_CHAR_OFFSET 10 + +void ZSTD_ldm_adjustParameters(ldmParams_t* params, + ZSTD_compressionParameters const* cParams) +{ + params->windowLog = cParams->windowLog; + ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX); + DEBUGLOG(4, "ZSTD_ldm_adjustParameters"); + if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG; + if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH; + if (cParams->strategy >= ZSTD_btopt) { + /* Get out of the way of the optimal parser */ + U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength); + assert(minMatch >= ZSTD_LDM_MINMATCH_MIN); + assert(minMatch <= ZSTD_LDM_MINMATCH_MAX); + params->minMatchLength = minMatch; + } + if (params->hashLog == 0) { + params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG); + assert(params->hashLog <= ZSTD_HASHLOG_MAX); + } + if (params->hashRateLog == 0) { + params->hashRateLog = params->windowLog < params->hashLog + ? 0 + : params->windowLog - params->hashLog; + } + params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog); +} + +size_t ZSTD_ldm_getTableSize(ldmParams_t params) +{ + size_t const ldmHSize = ((size_t)1) << params.hashLog; + size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog); + size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); + size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); + return params.enableLdm ? totalSize : 0; +} + +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) +{ + return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; +} + +/** ZSTD_ldm_getSmallHash() : + * numBits should be <= 32 + * If numBits==0, returns 0. + * @return : the most significant numBits of value. */ +static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits) +{ + assert(numBits <= 32); + return numBits == 0 ? 0 : (U32)(value >> (64 - numBits)); +} + +/** ZSTD_ldm_getChecksum() : + * numBitsToDiscard should be <= 32 + * @return : the next most significant 32 bits after numBitsToDiscard */ +static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard) +{ + assert(numBitsToDiscard <= 32); + return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF; +} + +/** ZSTD_ldm_getTag() ; + * Given the hash, returns the most significant numTagBits bits + * after (32 + hbits) bits. + * + * If there are not enough bits remaining, return the last + * numTagBits bits. */ +static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits) +{ + assert(numTagBits < 32 && hbits <= 32); + if (32 - hbits < numTagBits) { + return hash & (((U32)1 << numTagBits) - 1); + } else { + return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1); + } +} + +/** ZSTD_ldm_getBucket() : + * Returns a pointer to the start of the bucket associated with hash. */ +static ldmEntry_t* ZSTD_ldm_getBucket( + ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams) +{ + return ldmState->hashTable + (hash << ldmParams.bucketSizeLog); +} + +/** ZSTD_ldm_insertEntry() : + * Insert the entry with corresponding hash into the hash table */ +static void ZSTD_ldm_insertEntry(ldmState_t* ldmState, + size_t const hash, const ldmEntry_t entry, + ldmParams_t const ldmParams) +{ + BYTE* const bucketOffsets = ldmState->bucketOffsets; + *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + bucketOffsets[hash]) = entry; + bucketOffsets[hash]++; + bucketOffsets[hash] &= ((U32)1 << ldmParams.bucketSizeLog) - 1; +} + +/** ZSTD_ldm_makeEntryAndInsertByTag() : + * + * Gets the small hash, checksum, and tag from the rollingHash. + * + * If the tag matches (1 << ldmParams.hashRateLog)-1, then + * creates an ldmEntry from the offset, and inserts it into the hash table. + * + * hBits is the length of the small hash, which is the most significant hBits + * of rollingHash. The checksum is the next 32 most significant bits, followed + * by ldmParams.hashRateLog bits that make up the tag. */ +static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState, + U64 const rollingHash, + U32 const hBits, + U32 const offset, + ldmParams_t const ldmParams) +{ + U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog); + U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1; + if (tag == tagMask) { + U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits); + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + ldmEntry_t entry; + entry.offset = offset; + entry.checksum = checksum; + ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams); + } +} + +/** ZSTD_ldm_countBackwardsMatch() : + * Returns the number of bytes that match backwards before pIn and pMatch. + * + * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */ +static size_t ZSTD_ldm_countBackwardsMatch( + const BYTE* pIn, const BYTE* pAnchor, + const BYTE* pMatch, const BYTE* pBase) +{ + size_t matchLength = 0; + while (pIn > pAnchor && pMatch > pBase && pIn[-1] == pMatch[-1]) { + pIn--; + pMatch--; + matchLength++; + } + return matchLength; +} + +/** ZSTD_ldm_fillFastTables() : + * + * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies. + * This is similar to ZSTD_loadDictionaryContent. + * + * The tables for the other strategies are filled within their + * block compressors. */ +static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms, + void const* end) +{ + const BYTE* const iend = (const BYTE*)end; + + switch(ms->cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast); + break; + + case ZSTD_dfast: + ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btultra: + case ZSTD_btultra2: + break; + default: + assert(0); /* not possible : not a valid strategy id */ + } + + return 0; +} + +/** ZSTD_ldm_fillLdmHashTable() : + * + * Fills hashTable from (lastHashed + 1) to iend (non-inclusive). + * lastHash is the rolling hash that corresponds to lastHashed. + * + * Returns the rolling hash corresponding to position iend-1. */ +static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state, + U64 lastHash, const BYTE* lastHashed, + const BYTE* iend, const BYTE* base, + U32 hBits, ldmParams_t const ldmParams) +{ + U64 rollingHash = lastHash; + const BYTE* cur = lastHashed + 1; + + while (cur < iend) { + rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1], + cur[ldmParams.minMatchLength-1], + state->hashPower); + ZSTD_ldm_makeEntryAndInsertByTag(state, + rollingHash, hBits, + (U32)(cur - base), ldmParams); + ++cur; + } + return rollingHash; +} + +void ZSTD_ldm_fillHashTable( + ldmState_t* state, const BYTE* ip, + const BYTE* iend, ldmParams_t const* params) +{ + DEBUGLOG(5, "ZSTD_ldm_fillHashTable"); + if ((size_t)(iend - ip) >= params->minMatchLength) { + U64 startingHash = ZSTD_rollingHash_compute(ip, params->minMatchLength); + ZSTD_ldm_fillLdmHashTable( + state, startingHash, ip, iend - params->minMatchLength, state->window.base, + params->hashLog - params->bucketSizeLog, + *params); + } +} + + +/** ZSTD_ldm_limitTableUpdate() : + * + * Sets cctx->nextToUpdate to a position corresponding closer to anchor + * if it is far way + * (after a long match, only update tables a limited amount). */ +static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor) +{ + U32 const current = (U32)(anchor - ms->window.base); + if (current > ms->nextToUpdate + 1024) { + ms->nextToUpdate = + current - MIN(512, current - ms->nextToUpdate - 1024); + } +} + +static size_t ZSTD_ldm_generateSequences_internal( + ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, + ldmParams_t const* params, void const* src, size_t srcSize) +{ + /* LDM parameters */ + int const extDict = ZSTD_window_hasExtDict(ldmState->window); + U32 const minMatchLength = params->minMatchLength; + U64 const hashPower = ldmState->hashPower; + U32 const hBits = params->hashLog - params->bucketSizeLog; + U32 const ldmBucketSize = 1U << params->bucketSizeLog; + U32 const hashRateLog = params->hashRateLog; + U32 const ldmTagMask = (1U << params->hashRateLog) - 1; + /* Prefix and extDict parameters */ + U32 const dictLimit = ldmState->window.dictLimit; + U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit; + BYTE const* const base = ldmState->window.base; + BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL; + BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL; + BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL; + BYTE const* const lowPrefixPtr = base + dictLimit; + /* Input bounds */ + BYTE const* const istart = (BYTE const*)src; + BYTE const* const iend = istart + srcSize; + BYTE const* const ilimit = iend - MAX(minMatchLength, HASH_READ_SIZE); + /* Input positions */ + BYTE const* anchor = istart; + BYTE const* ip = istart; + /* Rolling hash */ + BYTE const* lastHashed = NULL; + U64 rollingHash = 0; + + while (ip <= ilimit) { + size_t mLength; + U32 const current = (U32)(ip - base); + size_t forwardMatchLength = 0, backwardMatchLength = 0; + ldmEntry_t* bestEntry = NULL; + if (ip != istart) { + rollingHash = ZSTD_rollingHash_rotate(rollingHash, lastHashed[0], + lastHashed[minMatchLength], + hashPower); + } else { + rollingHash = ZSTD_rollingHash_compute(ip, minMatchLength); + } + lastHashed = ip; + + /* Do not insert and do not look for a match */ + if (ZSTD_ldm_getTag(rollingHash, hBits, hashRateLog) != ldmTagMask) { + ip++; + continue; + } + + /* Get the best entry and compute the match lengths */ + { + ldmEntry_t* const bucket = + ZSTD_ldm_getBucket(ldmState, + ZSTD_ldm_getSmallHash(rollingHash, hBits), + *params); + ldmEntry_t* cur; + size_t bestMatchLength = 0; + U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits); + + for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) { + size_t curForwardMatchLength, curBackwardMatchLength, + curTotalMatchLength; + if (cur->checksum != checksum || cur->offset <= lowestIndex) { + continue; + } + if (extDict) { + BYTE const* const curMatchBase = + cur->offset < dictLimit ? dictBase : base; + BYTE const* const pMatch = curMatchBase + cur->offset; + BYTE const* const matchEnd = + cur->offset < dictLimit ? dictEnd : iend; + BYTE const* const lowMatchPtr = + cur->offset < dictLimit ? dictStart : lowPrefixPtr; + + curForwardMatchLength = ZSTD_count_2segments( + ip, pMatch, iend, + matchEnd, lowPrefixPtr); + if (curForwardMatchLength < minMatchLength) { + continue; + } + curBackwardMatchLength = + ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, + lowMatchPtr); + curTotalMatchLength = curForwardMatchLength + + curBackwardMatchLength; + } else { /* !extDict */ + BYTE const* const pMatch = base + cur->offset; + curForwardMatchLength = ZSTD_count(ip, pMatch, iend); + if (curForwardMatchLength < minMatchLength) { + continue; + } + curBackwardMatchLength = + ZSTD_ldm_countBackwardsMatch(ip, anchor, pMatch, + lowPrefixPtr); + curTotalMatchLength = curForwardMatchLength + + curBackwardMatchLength; + } + + if (curTotalMatchLength > bestMatchLength) { + bestMatchLength = curTotalMatchLength; + forwardMatchLength = curForwardMatchLength; + backwardMatchLength = curBackwardMatchLength; + bestEntry = cur; + } + } + } + + /* No match found -- continue searching */ + if (bestEntry == NULL) { + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, + hBits, current, + *params); + ip++; + continue; + } + + /* Match found */ + mLength = forwardMatchLength + backwardMatchLength; + ip -= backwardMatchLength; + + { + /* Store the sequence: + * ip = current - backwardMatchLength + * The match is at (bestEntry->offset - backwardMatchLength) + */ + U32 const matchIndex = bestEntry->offset; + U32 const offset = current - matchIndex; + rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size; + + /* Out of sequence storage */ + if (rawSeqStore->size == rawSeqStore->capacity) + return ERROR(dstSize_tooSmall); + seq->litLength = (U32)(ip - anchor); + seq->matchLength = (U32)mLength; + seq->offset = offset; + rawSeqStore->size++; + } + + /* Insert the current entry into the hash table */ + ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash, hBits, + (U32)(lastHashed - base), + *params); + + assert(ip + backwardMatchLength == lastHashed); + + /* Fill the hash table from lastHashed+1 to ip+mLength*/ + /* Heuristic: don't need to fill the entire table at end of block */ + if (ip + mLength <= ilimit) { + rollingHash = ZSTD_ldm_fillLdmHashTable( + ldmState, rollingHash, lastHashed, + ip + mLength, base, hBits, *params); + lastHashed = ip + mLength - 1; + } + ip += mLength; + anchor = ip; + } + return iend - anchor; +} + +/*! ZSTD_ldm_reduceTable() : + * reduce table indexes by `reducerValue` */ +static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size, + U32 const reducerValue) +{ + U32 u; + for (u = 0; u < size; u++) { + if (table[u].offset < reducerValue) table[u].offset = 0; + else table[u].offset -= reducerValue; + } +} + +size_t ZSTD_ldm_generateSequences( + ldmState_t* ldmState, rawSeqStore_t* sequences, + ldmParams_t const* params, void const* src, size_t srcSize) +{ + U32 const maxDist = 1U << params->windowLog; + BYTE const* const istart = (BYTE const*)src; + BYTE const* const iend = istart + srcSize; + size_t const kMaxChunkSize = 1 << 20; + size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0); + size_t chunk; + size_t leftoverSize = 0; + + assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize); + /* Check that ZSTD_window_update() has been called for this chunk prior + * to passing it to this function. + */ + assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize); + /* The input could be very large (in zstdmt), so it must be broken up into + * chunks to enforce the maximum distance and handle overflow correction. + */ + assert(sequences->pos <= sequences->size); + assert(sequences->size <= sequences->capacity); + for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) { + BYTE const* const chunkStart = istart + chunk * kMaxChunkSize; + size_t const remaining = (size_t)(iend - chunkStart); + BYTE const *const chunkEnd = + (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize; + size_t const chunkSize = chunkEnd - chunkStart; + size_t newLeftoverSize; + size_t const prevSize = sequences->size; + + assert(chunkStart < iend); + /* 1. Perform overflow correction if necessary. */ + if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) { + U32 const ldmHSize = 1U << params->hashLog; + U32 const correction = ZSTD_window_correctOverflow( + &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart); + ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction); + /* invalidate dictionaries on overflow correction */ + ldmState->loadedDictEnd = 0; + } + /* 2. We enforce the maximum offset allowed. + * + * kMaxChunkSize should be small enough that we don't lose too much of + * the window through early invalidation. + * TODO: * Test the chunk size. + * * Try invalidation after the sequence generation and test the + * the offset against maxDist directly. + * + * NOTE: Because of dictionaries + sequence splitting we MUST make sure + * that any offset used is valid at the END of the sequence, since it may + * be split into two sequences. This condition holds when using + * ZSTD_window_enforceMaxDist(), but if we move to checking offsets + * against maxDist directly, we'll have to carefully handle that case. + */ + ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL); + /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */ + newLeftoverSize = ZSTD_ldm_generateSequences_internal( + ldmState, sequences, params, chunkStart, chunkSize); + if (ZSTD_isError(newLeftoverSize)) + return newLeftoverSize; + /* 4. We add the leftover literals from previous iterations to the first + * newly generated sequence, or add the `newLeftoverSize` if none are + * generated. + */ + /* Prepend the leftover literals from the last call */ + if (prevSize < sequences->size) { + sequences->seq[prevSize].litLength += (U32)leftoverSize; + leftoverSize = newLeftoverSize; + } else { + assert(newLeftoverSize == chunkSize); + leftoverSize += chunkSize; + } + } + return 0; +} + +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { + while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { + rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; + if (srcSize <= seq->litLength) { + /* Skip past srcSize literals */ + seq->litLength -= (U32)srcSize; + return; + } + srcSize -= seq->litLength; + seq->litLength = 0; + if (srcSize < seq->matchLength) { + /* Skip past the first srcSize of the match */ + seq->matchLength -= (U32)srcSize; + if (seq->matchLength < minMatch) { + /* The match is too short, omit it */ + if (rawSeqStore->pos + 1 < rawSeqStore->size) { + seq[1].litLength += seq[0].matchLength; + } + rawSeqStore->pos++; + } + return; + } + srcSize -= seq->matchLength; + seq->matchLength = 0; + rawSeqStore->pos++; + } +} + +/** + * If the sequence length is longer than remaining then the sequence is split + * between this block and the next. + * + * Returns the current sequence to handle, or if the rest of the block should + * be literals, it returns a sequence with offset == 0. + */ +static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore, + U32 const remaining, U32 const minMatch) +{ + rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos]; + assert(sequence.offset > 0); + /* Likely: No partial sequence */ + if (remaining >= sequence.litLength + sequence.matchLength) { + rawSeqStore->pos++; + return sequence; + } + /* Cut the sequence short (offset == 0 ==> rest is literals). */ + if (remaining <= sequence.litLength) { + sequence.offset = 0; + } else if (remaining < sequence.litLength + sequence.matchLength) { + sequence.matchLength = remaining - sequence.litLength; + if (sequence.matchLength < minMatch) { + sequence.offset = 0; + } + } + /* Skip past `remaining` bytes for the future sequences. */ + ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch); + return sequence; +} + +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + unsigned const minMatch = cParams->minMatch; + ZSTD_blockCompressor const blockCompressor = + ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms)); + /* Input bounds */ + BYTE const* const istart = (BYTE const*)src; + BYTE const* const iend = istart + srcSize; + /* Input positions */ + BYTE const* ip = istart; + + DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize); + assert(rawSeqStore->pos <= rawSeqStore->size); + assert(rawSeqStore->size <= rawSeqStore->capacity); + /* Loop through each sequence and apply the block compressor to the lits */ + while (rawSeqStore->pos < rawSeqStore->size && ip < iend) { + /* maybeSplitSequence updates rawSeqStore->pos */ + rawSeq const sequence = maybeSplitSequence(rawSeqStore, + (U32)(iend - ip), minMatch); + int i; + /* End signal */ + if (sequence.offset == 0) + break; + + assert(ip + sequence.litLength + sequence.matchLength <= iend); + + /* Fill tables for block compressor */ + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, ip); + /* Run the block compressor */ + DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength); + { + size_t const newLitLength = + blockCompressor(ms, seqStore, rep, ip, sequence.litLength); + ip += sequence.litLength; + /* Update the repcodes */ + for (i = ZSTD_REP_NUM - 1; i > 0; i--) + rep[i] = rep[i-1]; + rep[0] = sequence.offset; + /* Store the sequence */ + ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, + sequence.offset + ZSTD_REP_MOVE, + sequence.matchLength - MINMATCH); + ip += sequence.matchLength; + } + } + /* Fill the tables for the block compressor */ + ZSTD_ldm_limitTableUpdate(ms, ip); + ZSTD_ldm_fillFastTables(ms, ip); + /* Compress the last literals */ + return blockCompressor(ms, seqStore, rep, ip, iend - ip); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_ldm.h b/src/borg/algorithms/zstd/lib/compress/zstd_ldm.h new file mode 100644 index 0000000000..229ea05a9e --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_ldm.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LDM_H +#define ZSTD_LDM_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "zstd_compress_internal.h" /* ldmParams_t, U32 */ +#include "../zstd.h" /* ZSTD_CCtx, size_t */ + +/*-************************************* +* Long distance matching +***************************************/ + +#define ZSTD_LDM_DEFAULT_WINDOW_LOG ZSTD_WINDOWLOG_LIMIT_DEFAULT + +void ZSTD_ldm_fillHashTable( + ldmState_t* state, const BYTE* ip, + const BYTE* iend, ldmParams_t const* params); + +/** + * ZSTD_ldm_generateSequences(): + * + * Generates the sequences using the long distance match finder. + * Generates long range matching sequences in `sequences`, which parse a prefix + * of the source. `sequences` must be large enough to store every sequence, + * which can be checked with `ZSTD_ldm_getMaxNbSeq()`. + * @returns 0 or an error code. + * + * NOTE: The user must have called ZSTD_window_update() for all of the input + * they have, even if they pass it to ZSTD_ldm_generateSequences() in chunks. + * NOTE: This function returns an error if it runs out of space to store + * sequences. + */ +size_t ZSTD_ldm_generateSequences( + ldmState_t* ldms, rawSeqStore_t* sequences, + ldmParams_t const* params, void const* src, size_t srcSize); + +/** + * ZSTD_ldm_blockCompress(): + * + * Compresses a block using the predefined sequences, along with a secondary + * block compressor. The literals section of every sequence is passed to the + * secondary block compressor, and those sequences are interspersed with the + * predefined sequences. Returns the length of the last literals. + * Updates `rawSeqStore.pos` to indicate how many sequences have been consumed. + * `rawSeqStore.seq` may also be updated to split the last sequence between two + * blocks. + * @return The length of the last literals. + * + * NOTE: The source must be at most the maximum block size, but the predefined + * sequences can be any size, and may be longer than the block. In the case that + * they are longer than the block, the last sequences may need to be split into + * two. We handle that case correctly, and update `rawSeqStore` appropriately. + * NOTE: This function does not return any errors. + */ +size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +/** + * ZSTD_ldm_skipSequences(): + * + * Skip past `srcSize` bytes worth of sequences in `rawSeqStore`. + * Avoids emitting matches less than `minMatch` bytes. + * Must be called for data with is not passed to ZSTD_ldm_blockCompress(). + */ +void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, + U32 const minMatch); + + +/** ZSTD_ldm_getTableSize() : + * Estimate the space needed for long distance matching tables or 0 if LDM is + * disabled. + */ +size_t ZSTD_ldm_getTableSize(ldmParams_t params); + +/** ZSTD_ldm_getSeqSpace() : + * Return an upper bound on the number of sequences that can be produced by + * the long distance matcher, or 0 if LDM is disabled. + */ +size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize); + +/** ZSTD_ldm_adjustParameters() : + * If the params->hashRateLog is not set, set it to its default value based on + * windowLog and params->hashLog. + * + * Ensures that params->bucketSizeLog is <= params->hashLog (setting it to + * params->hashLog if it is not). + * + * Ensures that the minMatchLength >= targetLength during optimal parsing. + */ +void ZSTD_ldm_adjustParameters(ldmParams_t* params, + ZSTD_compressionParameters const* cParams); + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_FAST_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_opt.c b/src/borg/algorithms/zstd/lib/compress/zstd_opt.c new file mode 100644 index 0000000000..36fff050cf --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_opt.c @@ -0,0 +1,1200 @@ +/* + * Copyright (c) 2016-2020, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "zstd_compress_internal.h" +#include "hist.h" +#include "zstd_opt.h" + + +#define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ +#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ +#define ZSTD_MAX_PRICE (1<<30) + +#define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ + + +/*-************************************* +* Price functions for optimal parser +***************************************/ + +#if 0 /* approximation at bit level */ +# define BITCOST_ACCURACY 0 +# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) +# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) +#elif 0 /* fractional bit accuracy */ +# define BITCOST_ACCURACY 8 +# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) +# define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) +#else /* opt==approx, ultra==accurate */ +# define BITCOST_ACCURACY 8 +# define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) +# define WEIGHT(stat,opt) (opt ? ZSTD_fracWeight(stat) : ZSTD_bitWeight(stat)) +#endif + +MEM_STATIC U32 ZSTD_bitWeight(U32 stat) +{ + return (ZSTD_highbit32(stat+1) * BITCOST_MULTIPLIER); +} + +MEM_STATIC U32 ZSTD_fracWeight(U32 rawStat) +{ + U32 const stat = rawStat + 1; + U32 const hb = ZSTD_highbit32(stat); + U32 const BWeight = hb * BITCOST_MULTIPLIER; + U32 const FWeight = (stat << BITCOST_ACCURACY) >> hb; + U32 const weight = BWeight + FWeight; + assert(hb + BITCOST_ACCURACY < 31); + return weight; +} + +#if (DEBUGLEVEL>=2) +/* debugging function, + * @return price in bytes as fractional value + * for debug messages only */ +MEM_STATIC double ZSTD_fCost(U32 price) +{ + return (double)price / (BITCOST_MULTIPLIER*8); +} +#endif + +static int ZSTD_compressedLiterals(optState_t const* const optPtr) +{ + return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; +} + +static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) +{ + if (ZSTD_compressedLiterals(optPtr)) + optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel); + optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel); + optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel); + optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel); +} + + +/* ZSTD_downscaleStat() : + * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) + * return the resulting sum of elements */ +static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) +{ + U32 s, sum=0; + DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1); + assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); + for (s=0; s> (ZSTD_FREQ_DIV+malus)); + sum += table[s]; + } + return sum; +} + +/* ZSTD_rescaleFreqs() : + * if first block (detected by optPtr->litLengthSum == 0) : init statistics + * take hints from dictionary if there is one + * or init from zero, using src for literals stats, or flat 1 for match symbols + * otherwise downscale existing stats, to be used as seed for next block. + */ +static void +ZSTD_rescaleFreqs(optState_t* const optPtr, + const BYTE* const src, size_t const srcSize, + int const optLevel) +{ + int const compressedLiterals = ZSTD_compressedLiterals(optPtr); + DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize); + optPtr->priceType = zop_dynamic; + + if (optPtr->litLengthSum == 0) { /* first block : init */ + if (srcSize <= ZSTD_PREDEF_THRESHOLD) { /* heuristic */ + DEBUGLOG(5, "(srcSize <= ZSTD_PREDEF_THRESHOLD) => zop_predef"); + optPtr->priceType = zop_predef; + } + + assert(optPtr->symbolCosts != NULL); + if (optPtr->symbolCosts->huf.repeatMode == HUF_repeat_valid) { + /* huffman table presumed generated by dictionary */ + optPtr->priceType = zop_dynamic; + + if (compressedLiterals) { + unsigned lit; + assert(optPtr->litFreq != NULL); + optPtr->litSum = 0; + for (lit=0; lit<=MaxLit; lit++) { + U32 const scaleLog = 11; /* scale to 2K */ + U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); + assert(bitCost <= scaleLog); + optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; + optPtr->litSum += optPtr->litFreq[lit]; + } } + + { unsigned ll; + FSE_CState_t llstate; + FSE_initCState(&llstate, optPtr->symbolCosts->fse.litlengthCTable); + optPtr->litLengthSum = 0; + for (ll=0; ll<=MaxLL; ll++) { + U32 const scaleLog = 10; /* scale to 1K */ + U32 const bitCost = FSE_getMaxNbBits(llstate.symbolTT, ll); + assert(bitCost < scaleLog); + optPtr->litLengthFreq[ll] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; + optPtr->litLengthSum += optPtr->litLengthFreq[ll]; + } } + + { unsigned ml; + FSE_CState_t mlstate; + FSE_initCState(&mlstate, optPtr->symbolCosts->fse.matchlengthCTable); + optPtr->matchLengthSum = 0; + for (ml=0; ml<=MaxML; ml++) { + U32 const scaleLog = 10; + U32 const bitCost = FSE_getMaxNbBits(mlstate.symbolTT, ml); + assert(bitCost < scaleLog); + optPtr->matchLengthFreq[ml] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; + optPtr->matchLengthSum += optPtr->matchLengthFreq[ml]; + } } + + { unsigned of; + FSE_CState_t ofstate; + FSE_initCState(&ofstate, optPtr->symbolCosts->fse.offcodeCTable); + optPtr->offCodeSum = 0; + for (of=0; of<=MaxOff; of++) { + U32 const scaleLog = 10; + U32 const bitCost = FSE_getMaxNbBits(ofstate.symbolTT, of); + assert(bitCost < scaleLog); + optPtr->offCodeFreq[of] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; + optPtr->offCodeSum += optPtr->offCodeFreq[of]; + } } + + } else { /* not a dictionary */ + + assert(optPtr->litFreq != NULL); + if (compressedLiterals) { + unsigned lit = MaxLit; + HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ + optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); + } + + { unsigned ll; + for (ll=0; ll<=MaxLL; ll++) + optPtr->litLengthFreq[ll] = 1; + } + optPtr->litLengthSum = MaxLL+1; + + { unsigned ml; + for (ml=0; ml<=MaxML; ml++) + optPtr->matchLengthFreq[ml] = 1; + } + optPtr->matchLengthSum = MaxML+1; + + { unsigned of; + for (of=0; of<=MaxOff; of++) + optPtr->offCodeFreq[of] = 1; + } + optPtr->offCodeSum = MaxOff+1; + + } + + } else { /* new block : re-use previous statistics, scaled down */ + + if (compressedLiterals) + optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); + optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); + optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); + optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); + } + + ZSTD_setBasePrices(optPtr, optLevel); +} + +/* ZSTD_rawLiteralsCost() : + * price of literals (only) in specified segment (which length can be 0). + * does not include price of literalLength symbol */ +static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, + const optState_t* const optPtr, + int optLevel) +{ + if (litLength == 0) return 0; + + if (!ZSTD_compressedLiterals(optPtr)) + return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */ + + if (optPtr->priceType == zop_predef) + return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */ + + /* dynamic statistics */ + { U32 price = litLength * optPtr->litSumBasePrice; + U32 u; + for (u=0; u < litLength; u++) { + assert(WEIGHT(optPtr->litFreq[literals[u]], optLevel) <= optPtr->litSumBasePrice); /* literal cost should never be negative */ + price -= WEIGHT(optPtr->litFreq[literals[u]], optLevel); + } + return price; + } +} + +/* ZSTD_litLengthPrice() : + * cost of literalLength symbol */ +static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) +{ + if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); + + /* dynamic statistics */ + { U32 const llCode = ZSTD_LLcode(litLength); + return (LL_bits[llCode] * BITCOST_MULTIPLIER) + + optPtr->litLengthSumBasePrice + - WEIGHT(optPtr->litLengthFreq[llCode], optLevel); + } +} + +/* ZSTD_getMatchPrice() : + * Provides the cost of the match part (offset + matchLength) of a sequence + * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. + * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ +FORCE_INLINE_TEMPLATE U32 +ZSTD_getMatchPrice(U32 const offset, + U32 const matchLength, + const optState_t* const optPtr, + int const optLevel) +{ + U32 price; + U32 const offCode = ZSTD_highbit32(offset+1); + U32 const mlBase = matchLength - MINMATCH; + assert(matchLength >= MINMATCH); + + if (optPtr->priceType == zop_predef) /* fixed scheme, do not use statistics */ + return WEIGHT(mlBase, optLevel) + ((16 + offCode) * BITCOST_MULTIPLIER); + + /* dynamic statistics */ + price = (offCode * BITCOST_MULTIPLIER) + (optPtr->offCodeSumBasePrice - WEIGHT(optPtr->offCodeFreq[offCode], optLevel)); + if ((optLevel<2) /*static*/ && offCode >= 20) + price += (offCode-19)*2 * BITCOST_MULTIPLIER; /* handicap for long distance offsets, favor decompression speed */ + + /* match Length */ + { U32 const mlCode = ZSTD_MLcode(mlBase); + price += (ML_bits[mlCode] * BITCOST_MULTIPLIER) + (optPtr->matchLengthSumBasePrice - WEIGHT(optPtr->matchLengthFreq[mlCode], optLevel)); + } + + price += BITCOST_MULTIPLIER / 5; /* heuristic : make matches a bit more costly to favor less sequences -> faster decompression speed */ + + DEBUGLOG(8, "ZSTD_getMatchPrice(ml:%u) = %u", matchLength, price); + return price; +} + +/* ZSTD_updateStats() : + * assumption : literals + litLengtn <= iend */ +static void ZSTD_updateStats(optState_t* const optPtr, + U32 litLength, const BYTE* literals, + U32 offsetCode, U32 matchLength) +{ + /* literals */ + if (ZSTD_compressedLiterals(optPtr)) { + U32 u; + for (u=0; u < litLength; u++) + optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; + optPtr->litSum += litLength*ZSTD_LITFREQ_ADD; + } + + /* literal Length */ + { U32 const llCode = ZSTD_LLcode(litLength); + optPtr->litLengthFreq[llCode]++; + optPtr->litLengthSum++; + } + + /* match offset code (0-2=>repCode; 3+=>offset+2) */ + { U32 const offCode = ZSTD_highbit32(offsetCode+1); + assert(offCode <= MaxOff); + optPtr->offCodeFreq[offCode]++; + optPtr->offCodeSum++; + } + + /* match Length */ + { U32 const mlBase = matchLength - MINMATCH; + U32 const mlCode = ZSTD_MLcode(mlBase); + optPtr->matchLengthFreq[mlCode]++; + optPtr->matchLengthSum++; + } +} + + +/* ZSTD_readMINMATCH() : + * function safe only for comparisons + * assumption : memPtr must be at least 4 bytes before end of buffer */ +MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) +{ + switch (length) + { + default : + case 4 : return MEM_read32(memPtr); + case 3 : if (MEM_isLittleEndian()) + return MEM_read32(memPtr)<<8; + else + return MEM_read32(memPtr)>>8; + } +} + + +/* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip) +{ + U32* const hashTable3 = ms->hashTable3; + U32 const hashLog3 = ms->hashLog3; + const BYTE* const base = ms->window.base; + U32 idx = *nextToUpdate3; + U32 const target = (U32)(ip - base); + size_t const hash3 = ZSTD_hash3Ptr(ip, hashLog3); + assert(hashLog3 > 0); + + while(idx < target) { + hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; + idx++; + } + + *nextToUpdate3 = target; + return hashTable3[hash3]; +} + + +/*-************************************* +* Binary Tree search +***************************************/ +/** ZSTD_insertBt1() : add one or multiple positions to tree. + * ip : assumed <= iend-8 . + * @return : nb of positions added */ +static U32 ZSTD_insertBt1( + ZSTD_matchState_t* ms, + const BYTE* const ip, const BYTE* const iend, + U32 const mls, const int extDict) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32* const hashTable = ms->hashTable; + U32 const hashLog = cParams->hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = ms->window.base; + const BYTE* const dictBase = ms->window.dictBase; + const U32 dictLimit = ms->window.dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = ms->window.lowLimit; + U32 matchEndIdx = current+8+1; + size_t bestLength = 8; + U32 nbCompares = 1U << cParams->searchLog; +#ifdef ZSTD_C_PREDICT + U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); + U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); + predictedSmall += (predictedSmall>0); + predictedLarge += (predictedLarge>0); +#endif /* ZSTD_C_PREDICT */ + + DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current); + + assert(ip <= iend-8); /* required for h calculation */ + hashTable[h] = current; /* Update Hash Table */ + + assert(windowLow > 0); + while (nbCompares-- && (matchIndex >= windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + assert(matchIndex < current); + +#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ + const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ + if (matchIndex == predictedSmall) { + /* no need to check length, result known */ + *smallerPtr = matchIndex; + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + predictedSmall = predictPtr[1] + (predictPtr[1]>0); + continue; + } + if (matchIndex == predictedLarge) { + *largerPtr = matchIndex; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + predictedLarge = predictPtr[0] + (predictPtr[0]>0); + continue; + } +#endif + + if (!extDict || (matchIndex+matchLength >= dictLimit)) { + assert(matchIndex+matchLength >= dictLimit); /* might be wrong if actually extDict */ + match = base + matchIndex; + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend); + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + } + + if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */ + } + + if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */ + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */ + matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + { U32 positions = 0; + if (bestLength > 384) positions = MIN(192, (U32)(bestLength - 384)); /* speed optimization */ + assert(matchEndIdx > current + 8); + return MAX(positions, matchEndIdx - (current + 8)); + } +} + +FORCE_INLINE_TEMPLATE +void ZSTD_updateTree_internal( + ZSTD_matchState_t* ms, + const BYTE* const ip, const BYTE* const iend, + const U32 mls, const ZSTD_dictMode_e dictMode) +{ + const BYTE* const base = ms->window.base; + U32 const target = (U32)(ip - base); + U32 idx = ms->nextToUpdate; + DEBUGLOG(6, "ZSTD_updateTree_internal, from %u to %u (dictMode:%u)", + idx, target, dictMode); + + while(idx < target) { + U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); + assert(idx < (U32)(idx + forward)); + idx += forward; + } + assert((size_t)(ip - base) <= (size_t)(U32)(-1)); + assert((size_t)(iend - base) <= (size_t)(U32)(-1)); + ms->nextToUpdate = target; +} + +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) { + ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); +} + +FORCE_INLINE_TEMPLATE +U32 ZSTD_insertBtAndGetAllMatches ( + ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ + ZSTD_matchState_t* ms, + U32* nextToUpdate3, + const BYTE* const ip, const BYTE* const iLimit, const ZSTD_dictMode_e dictMode, + const U32 rep[ZSTD_REP_NUM], + U32 const ll0, /* tells if associated literal length is 0 or not. This value must be 0 or 1 */ + const U32 lengthToBeat, + U32 const mls /* template */) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); + const BYTE* const base = ms->window.base; + U32 const current = (U32)(ip-base); + U32 const hashLog = cParams->hashLog; + U32 const minMatch = (mls==3) ? 3 : 4; + U32* const hashTable = ms->hashTable; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32 matchIndex = hashTable[h]; + U32* const bt = ms->chainTable; + U32 const btLog = cParams->chainLog - 1; + U32 const btMask= (1U << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const dictBase = ms->window.dictBase; + U32 const dictLimit = ms->window.dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + U32 const btLow = (btMask >= current) ? 0 : current - btMask; + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog); + U32 const matchLow = windowLow ? windowLow : 1; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8+1; /* farthest referenced position of any match => detects repetitive patterns */ + U32 dummy32; /* to be nullified at the end */ + U32 mnum = 0; + U32 nbCompares = 1U << cParams->searchLog; + + const ZSTD_matchState_t* dms = dictMode == ZSTD_dictMatchState ? ms->dictMatchState : NULL; + const ZSTD_compressionParameters* const dmsCParams = + dictMode == ZSTD_dictMatchState ? &dms->cParams : NULL; + const BYTE* const dmsBase = dictMode == ZSTD_dictMatchState ? dms->window.base : NULL; + const BYTE* const dmsEnd = dictMode == ZSTD_dictMatchState ? dms->window.nextSrc : NULL; + U32 const dmsHighLimit = dictMode == ZSTD_dictMatchState ? (U32)(dmsEnd - dmsBase) : 0; + U32 const dmsLowLimit = dictMode == ZSTD_dictMatchState ? dms->window.lowLimit : 0; + U32 const dmsIndexDelta = dictMode == ZSTD_dictMatchState ? windowLow - dmsHighLimit : 0; + U32 const dmsHashLog = dictMode == ZSTD_dictMatchState ? dmsCParams->hashLog : hashLog; + U32 const dmsBtLog = dictMode == ZSTD_dictMatchState ? dmsCParams->chainLog - 1 : btLog; + U32 const dmsBtMask = dictMode == ZSTD_dictMatchState ? (1U << dmsBtLog) - 1 : 0; + U32 const dmsBtLow = dictMode == ZSTD_dictMatchState && dmsBtMask < dmsHighLimit - dmsLowLimit ? dmsHighLimit - dmsBtMask : dmsLowLimit; + + size_t bestLength = lengthToBeat-1; + DEBUGLOG(8, "ZSTD_insertBtAndGetAllMatches: current=%u", current); + + /* check repCode */ + assert(ll0 <= 1); /* necessarily 1 or 0 */ + { U32 const lastR = ZSTD_REP_NUM + ll0; + U32 repCode; + for (repCode = ll0; repCode < lastR; repCode++) { + U32 const repOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + U32 const repIndex = current - repOffset; + U32 repLen = 0; + assert(current >= dictLimit); + if (repOffset-1 /* intentional overflow, discards 0 and -1 */ < current-dictLimit) { /* equivalent to `current > repIndex >= dictLimit` */ + /* We must validate the repcode offset because when we're using a dictionary the + * valid offset range shrinks when the dictionary goes out of bounds. + */ + if ((repIndex >= windowLow) & (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(ip - repOffset, minMatch))) { + repLen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repOffset, iLimit) + minMatch; + } + } else { /* repIndex < dictLimit || repIndex >= current */ + const BYTE* const repMatch = dictMode == ZSTD_dictMatchState ? + dmsBase + repIndex - dmsIndexDelta : + dictBase + repIndex; + assert(current >= windowLow); + if ( dictMode == ZSTD_extDict + && ( ((repOffset-1) /*intentional overflow*/ < current - windowLow) /* equivalent to `current > repIndex >= windowLow` */ + & (((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */) + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dictEnd, prefixStart) + minMatch; + } + if (dictMode == ZSTD_dictMatchState + && ( ((repOffset-1) /*intentional overflow*/ < current - (dmsLowLimit + dmsIndexDelta)) /* equivalent to `current > repIndex >= dmsLowLimit` */ + & ((U32)((dictLimit-1) - repIndex) >= 3) ) /* intentional overflow : do not test positions overlapping 2 memory segments */ + && (ZSTD_readMINMATCH(ip, minMatch) == ZSTD_readMINMATCH(repMatch, minMatch)) ) { + repLen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iLimit, dmsEnd, prefixStart) + minMatch; + } } + /* save longer solution */ + if (repLen > bestLength) { + DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", + repCode, ll0, repOffset, repLen); + bestLength = repLen; + matches[mnum].off = repCode - ll0; + matches[mnum].len = (U32)repLen; + mnum++; + if ( (repLen > sufficient_len) + | (ip+repLen == iLimit) ) { /* best possible */ + return mnum; + } } } } + + /* HC3 match finder */ + if ((mls == 3) /*static*/ && (bestLength < mls)) { + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3(ms, nextToUpdate3, ip); + if ((matchIndex3 >= matchLow) + & (current - matchIndex3 < (1<<18)) /*heuristic : longer distance likely too expensive*/ ) { + size_t mlen; + if ((dictMode == ZSTD_noDict) /*static*/ || (dictMode == ZSTD_dictMatchState) /*static*/ || (matchIndex3 >= dictLimit)) { + const BYTE* const match = base + matchIndex3; + mlen = ZSTD_count(ip, match, iLimit); + } else { + const BYTE* const match = dictBase + matchIndex3; + mlen = ZSTD_count_2segments(ip, match, iLimit, dictEnd, prefixStart); + } + + /* save best solution */ + if (mlen >= mls /* == 3 > bestLength */) { + DEBUGLOG(8, "found small match with hlog3, of length %u", + (U32)mlen); + bestLength = mlen; + assert(current > matchIndex3); + assert(mnum==0); /* no prior solution */ + matches[0].off = (current - matchIndex3) + ZSTD_REP_MOVE; + matches[0].len = (U32)mlen; + mnum = 1; + if ( (mlen > sufficient_len) | + (ip+mlen == iLimit) ) { /* best possible length */ + ms->nextToUpdate = current+1; /* skip insertion */ + return 1; + } } } + /* no dictMatchState lookup: dicts don't have a populated HC3 table */ + } + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex >= matchLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + const BYTE* match; + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + assert(current > matchIndex); + + if ((dictMode == ZSTD_noDict) || (dictMode == ZSTD_dictMatchState) || (matchIndex+matchLength >= dictLimit)) { + assert(matchIndex+matchLength >= dictLimit); /* ensure the condition is correct when !extDict */ + match = base + matchIndex; + if (matchIndex >= dictLimit) assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ + matchLength += ZSTD_count(ip+matchLength, match+matchLength, iLimit); + } else { + match = dictBase + matchIndex; + assert(memcmp(match, ip, matchLength) == 0); /* ensure early section of match is equal as expected */ + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* prepare for match[matchLength] read */ + } + + if (matchLength > bestLength) { + DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", + (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); + assert(matchEndIdx > matchIndex); + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; + matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].len = (U32)matchLength; + mnum++; + if ( (matchLength > ZSTD_OPT_NUM) + | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { + if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ + break; /* drop, to preserve bt consistency (miss a little bit of compression) */ + } + } + + if (match[matchLength] < ip[matchLength]) { + /* match smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new candidate => larger than match, which was smaller than current */ + matchIndex = nextPtr[1]; /* new matchIndex, larger than previous, closer to current */ + } else { + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + if (dictMode == ZSTD_dictMatchState && nbCompares) { + size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); + U32 dictMatchIndex = dms->hashTable[dmsH]; + const U32* const dmsBt = dms->chainTable; + commonLengthSmaller = commonLengthLarger = 0; + while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { + const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match = dmsBase + dictMatchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dmsEnd, prefixStart); + if (dictMatchIndex+matchLength >= dmsHighLimit) + match = base + dictMatchIndex + dmsIndexDelta; /* to prepare for next usage of match[matchLength] */ + + if (matchLength > bestLength) { + matchIndex = dictMatchIndex + dmsIndexDelta; + DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", + (U32)matchLength, current - matchIndex, current - matchIndex + ZSTD_REP_MOVE); + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; + matches[mnum].off = (current - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].len = (U32)matchLength; + mnum++; + if ( (matchLength > ZSTD_OPT_NUM) + | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + } + + if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ + if (match[matchLength] < ip[matchLength]) { + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + commonLengthLarger = matchLength; + dictMatchIndex = nextPtr[0]; + } + } + } + + assert(matchEndIdx > current+8); + ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ + return mnum; +} + + +FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( + ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */ + ZSTD_matchState_t* ms, + U32* nextToUpdate3, + const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, + const U32 rep[ZSTD_REP_NUM], + U32 const ll0, + U32 const lengthToBeat) +{ + const ZSTD_compressionParameters* const cParams = &ms->cParams; + U32 const matchLengthSearch = cParams->minMatch; + DEBUGLOG(8, "ZSTD_BtGetAllMatches"); + if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); + switch(matchLengthSearch) + { + case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3); + default : + case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4); + case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5); + case 7 : + case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6); + } +} + + +/*-******************************* +* Optimal parser +*********************************/ + + +static U32 ZSTD_totalLen(ZSTD_optimal_t sol) +{ + return sol.litlen + sol.mlen; +} + +#if 0 /* debug */ + +static void +listStats(const U32* table, int lastEltID) +{ + int const nbElts = lastEltID + 1; + int enb; + for (enb=0; enb < nbElts; enb++) { + (void)table; + /* RAWLOG(2, "%3i:%3i, ", enb, table[enb]); */ + RAWLOG(2, "%4i,", table[enb]); + } + RAWLOG(2, " \n"); +} + +#endif + +FORCE_INLINE_TEMPLATE size_t +ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, + seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, + const int optLevel, + const ZSTD_dictMode_e dictMode) +{ + optState_t* const optStatePtr = &ms->opt; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ms->window.base; + const BYTE* const prefixStart = base + ms->window.dictLimit; + const ZSTD_compressionParameters* const cParams = &ms->cParams; + + U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); + U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; + U32 nextToUpdate3 = ms->nextToUpdate; + + ZSTD_optimal_t* const opt = optStatePtr->priceTable; + ZSTD_match_t* const matches = optStatePtr->matchTable; + ZSTD_optimal_t lastSequence; + + /* init */ + DEBUGLOG(5, "ZSTD_compressBlock_opt_generic: current=%u, prefix=%u, nextToUpdate=%u", + (U32)(ip - base), ms->window.dictLimit, ms->nextToUpdate); + assert(optLevel <= 2); + ZSTD_rescaleFreqs(optStatePtr, (const BYTE*)src, srcSize, optLevel); + ip += (ip==prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, last_pos = 0; + + /* find first match */ + { U32 const litlen = (U32)(ip - anchor); + U32 const ll0 = !litlen; + U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch); + if (!nbMatches) { ip++; continue; } + + /* initialize opt[0] */ + { U32 i ; for (i=0; i immediate encoding */ + { U32 const maxML = matches[nbMatches-1].len; + U32 const maxOffset = matches[nbMatches-1].off; + DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", + nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); + + if (maxML > sufficient_len) { + lastSequence.litlen = litlen; + lastSequence.mlen = maxML; + lastSequence.off = maxOffset; + DEBUGLOG(6, "large match (%u>%u), immediate encoding", + maxML, sufficient_len); + cur = 0; + last_pos = ZSTD_totalLen(lastSequence); + goto _shortestPath; + } } + + /* set prices for first matches starting position == 0 */ + { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); + U32 pos; + U32 matchNb; + for (pos = 1; pos < minMatch; pos++) { + opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ + } + for (matchNb = 0; matchNb < nbMatches; matchNb++) { + U32 const offset = matches[matchNb].off; + U32 const end = matches[matchNb].len; + for ( ; pos <= end ; pos++ ) { + U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); + U32 const sequencePrice = literalsPrice + matchPrice; + DEBUGLOG(7, "rPos:%u => set initial price : %.2f", + pos, ZSTD_fCost(sequencePrice)); + opt[pos].mlen = pos; + opt[pos].off = offset; + opt[pos].litlen = litlen; + opt[pos].price = sequencePrice; + } } + last_pos = pos-1; + } + } + + /* check further positions */ + for (cur = 1; cur <= last_pos; cur++) { + const BYTE* const inr = ip + cur; + assert(cur < ZSTD_OPT_NUM); + DEBUGLOG(7, "cPos:%zi==rPos:%u", inr-istart, cur) + + /* Fix current position with one literal if cheaper */ + { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; + int const price = opt[cur-1].price + + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) + + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) + - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); + assert(price < 1000000000); /* overflow check */ + if (price <= opt[cur].price) { + DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", + inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), litlen, + opt[cur-1].rep[0], opt[cur-1].rep[1], opt[cur-1].rep[2]); + opt[cur].mlen = 0; + opt[cur].off = 0; + opt[cur].litlen = litlen; + opt[cur].price = price; + } else { + DEBUGLOG(7, "cPos:%zi==rPos:%u : literal would cost more (%.2f>%.2f) (hist:%u,%u,%u)", + inr-istart, cur, ZSTD_fCost(price), ZSTD_fCost(opt[cur].price), + opt[cur].rep[0], opt[cur].rep[1], opt[cur].rep[2]); + } + } + + /* Set the repcodes of the current position. We must do it here + * because we rely on the repcodes of the 2nd to last sequence being + * correct to set the next chunks repcodes during the backward + * traversal. + */ + ZSTD_STATIC_ASSERT(sizeof(opt[cur].rep) == sizeof(repcodes_t)); + assert(cur >= opt[cur].mlen); + if (opt[cur].mlen != 0) { + U32 const prev = cur - opt[cur].mlen; + repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); + memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); + } else { + memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); + } + + /* last match must start at a minimum distance of 8 from oend */ + if (inr > ilimit) continue; + + if (cur == last_pos) break; + + if ( (optLevel==0) /*static_test*/ + && (opt[cur+1].price <= opt[cur].price + (BITCOST_MULTIPLIER/2)) ) { + DEBUGLOG(7, "move to next rPos:%u : price is <=", cur+1); + continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ + } + + { U32 const ll0 = (opt[cur].mlen != 0); + U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; + U32 const previousPrice = opt[cur].price; + U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); + U32 const nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); + U32 matchNb; + if (!nbMatches) { + DEBUGLOG(7, "rPos:%u : no match found", cur); + continue; + } + + { U32 const maxML = matches[nbMatches-1].len; + DEBUGLOG(7, "cPos:%zi==rPos:%u, found %u matches, of maxLength=%u", + inr-istart, cur, nbMatches, maxML); + + if ( (maxML > sufficient_len) + || (cur + maxML >= ZSTD_OPT_NUM) ) { + lastSequence.mlen = maxML; + lastSequence.off = matches[nbMatches-1].off; + lastSequence.litlen = litlen; + cur -= (opt[cur].mlen==0) ? opt[cur].litlen : 0; /* last sequence is actually only literals, fix cur to last match - note : may underflow, in which case, it's first sequence, and it's okay */ + last_pos = cur + ZSTD_totalLen(lastSequence); + if (cur > ZSTD_OPT_NUM) cur = 0; /* underflow => first match */ + goto _shortestPath; + } } + + /* set prices using matches found at position == cur */ + for (matchNb = 0; matchNb < nbMatches; matchNb++) { + U32 const offset = matches[matchNb].off; + U32 const lastML = matches[matchNb].len; + U32 const startML = (matchNb>0) ? matches[matchNb-1].len+1 : minMatch; + U32 mlen; + + DEBUGLOG(7, "testing match %u => offCode=%4u, mlen=%2u, llen=%2u", + matchNb, matches[matchNb].off, lastML, litlen); + + for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ + U32 const pos = cur + mlen; + int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + + if ((pos > last_pos) || (price < opt[pos].price)) { + DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", + pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); + while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } /* fill empty positions */ + opt[pos].mlen = mlen; + opt[pos].off = offset; + opt[pos].litlen = litlen; + opt[pos].price = price; + } else { + DEBUGLOG(7, "rPos:%u (ml=%2u) => new price is worse (%.2f>=%.2f)", + pos, mlen, ZSTD_fCost(price), ZSTD_fCost(opt[pos].price)); + if (optLevel==0) break; /* early update abort; gets ~+10% speed for about -0.01 ratio loss */ + } + } } } + } /* for (cur = 1; cur <= last_pos; cur++) */ + + lastSequence = opt[last_pos]; + cur = last_pos > ZSTD_totalLen(lastSequence) ? last_pos - ZSTD_totalLen(lastSequence) : 0; /* single sequence, and it starts before `ip` */ + assert(cur < ZSTD_OPT_NUM); /* control overflow*/ + +_shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ + assert(opt[0].mlen == 0); + + /* Set the next chunk's repcodes based on the repcodes of the beginning + * of the last match, and the last sequence. This avoids us having to + * update them while traversing the sequences. + */ + if (lastSequence.mlen != 0) { + repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); + memcpy(rep, &reps, sizeof(reps)); + } else { + memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); + } + + { U32 const storeEnd = cur + 1; + U32 storeStart = storeEnd; + U32 seqPos = cur; + + DEBUGLOG(6, "start reverse traversal (last_pos:%u, cur:%u)", + last_pos, cur); (void)last_pos; + assert(storeEnd < ZSTD_OPT_NUM); + DEBUGLOG(6, "last sequence copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", + storeEnd, lastSequence.litlen, lastSequence.mlen, lastSequence.off); + opt[storeEnd] = lastSequence; + while (seqPos > 0) { + U32 const backDist = ZSTD_totalLen(opt[seqPos]); + storeStart--; + DEBUGLOG(6, "sequence from rPos=%u copied into pos=%u (llen=%u,mlen=%u,ofc=%u)", + seqPos, storeStart, opt[seqPos].litlen, opt[seqPos].mlen, opt[seqPos].off); + opt[storeStart] = opt[seqPos]; + seqPos = (seqPos > backDist) ? seqPos - backDist : 0; + } + + /* save sequences */ + DEBUGLOG(6, "sending selected sequences into seqStore") + { U32 storePos; + for (storePos=storeStart; storePos <= storeEnd; storePos++) { + U32 const llen = opt[storePos].litlen; + U32 const mlen = opt[storePos].mlen; + U32 const offCode = opt[storePos].off; + U32 const advance = llen + mlen; + DEBUGLOG(6, "considering seq starting at %zi, llen=%u, mlen=%u", + anchor - istart, (unsigned)llen, (unsigned)mlen); + + if (mlen==0) { /* only literals => must be last "sequence", actually starting a new stream of sequences */ + assert(storePos == storeEnd); /* must be last sequence */ + ip = anchor + llen; /* last "sequence" is a bunch of literals => don't progress anchor */ + continue; /* will finish */ + } + + assert(anchor + llen <= iend); + ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH); + anchor += advance; + ip = anchor; + } } + ZSTD_setBasePrices(optStatePtr, optLevel); + } + } /* while (ip < ilimit) */ + + /* Return the last literals size */ + return (size_t)(iend - anchor); +} + + +size_t ZSTD_compressBlock_btopt( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressBlock_btopt"); + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); +} + + +/* used in 2-pass strategy */ +static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) +{ + U32 s, sum=0; + assert(ZSTD_FREQ_DIV+bonus >= 0); + for (s=0; slitSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); + optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); + optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); + optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); +} + +/* ZSTD_initStats_ultra(): + * make a first compression pass, just to seed stats with more accurate starting values. + * only works on first block, with no dictionary and no ldm. + * this function cannot error, hence its contract must be respected. + */ +static void +ZSTD_initStats_ultra(ZSTD_matchState_t* ms, + seqStore_t* seqStore, + U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ + memcpy(tmpRep, rep, sizeof(tmpRep)); + + DEBUGLOG(4, "ZSTD_initStats_ultra (srcSize=%zu)", srcSize); + assert(ms->opt.litLengthSum == 0); /* first block */ + assert(seqStore->sequences == seqStore->sequencesStart); /* no ldm */ + assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ + assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ + + ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ + + /* invalidate first scan from history */ + ZSTD_resetSeqStore(seqStore); + ms->window.base -= srcSize; + ms->window.dictLimit += (U32)srcSize; + ms->window.lowLimit = ms->window.dictLimit; + ms->nextToUpdate = ms->window.dictLimit; + + /* re-inforce weight of collected statistics */ + ZSTD_upscaleStats(&ms->opt); +} + +size_t ZSTD_compressBlock_btultra( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_btultra2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + U32 const current = (U32)((const BYTE*)src - ms->window.base); + DEBUGLOG(5, "ZSTD_compressBlock_btultra2 (srcSize=%zu)", srcSize); + + /* 2-pass strategy: + * this strategy makes a first pass over first block to collect statistics + * and seed next round's statistics with it. + * After 1st pass, function forgets everything, and starts a new block. + * Consequently, this can only work if no data has been previously loaded in tables, + * aka, no dictionary, no prefix, no ldm preprocessing. + * The compression ratio gain is generally small (~0.5% on first block), + * the cost is 2x cpu time on first block. */ + assert(srcSize <= ZSTD_BLOCKSIZE_MAX); + if ( (ms->opt.litLengthSum==0) /* first block */ + && (seqStore->sequences == seqStore->sequencesStart) /* no ldm */ + && (ms->window.dictLimit == ms->window.lowLimit) /* no dictionary */ + && (current == ms->window.dictLimit) /* start of frame, nothing already loaded nor skipped */ + && (srcSize > ZSTD_PREDEF_THRESHOLD) + ) { + ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); + } + + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); +} + +size_t ZSTD_compressBlock_btopt_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_btultra_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); +} + +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); +} + +size_t ZSTD_compressBlock_btultra_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); +} + +/* note : no btultra2 variant for extDict nor dictMatchState, + * because btultra2 is not meant to work with dictionaries + * and is only specific for the first block (no prefix) */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstd_opt.h b/src/borg/algorithms/zstd/lib/compress/zstd_opt.h new file mode 100644 index 0000000000..9aba8a9018 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstd_opt.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_OPT_H +#define ZSTD_OPT_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include "zstd_compress_internal.h" + +/* used in ZSTD_loadDictionaryContent() */ +void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend); + +size_t ZSTD_compressBlock_btopt( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + + +size_t ZSTD_compressBlock_btopt_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra_dictMatchState( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + +size_t ZSTD_compressBlock_btopt_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); +size_t ZSTD_compressBlock_btultra_extDict( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize); + + /* note : no btultra2 variant for extDict nor dictMatchState, + * because btultra2 is not meant to work with dictionaries + * and is only specific for the first block (no prefix) */ + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_OPT_H */ diff --git a/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.c b/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.c new file mode 100644 index 0000000000..1e3c8fdbee --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.c @@ -0,0 +1,2143 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/* ====== Compiler specifics ====== */ +#if defined(_MSC_VER) +# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */ +#endif + + +/* ====== Constants ====== */ +#define ZSTDMT_OVERLAPLOG_DEFAULT 0 + + +/* ====== Dependencies ====== */ +#include /* memcpy, memset */ +#include /* INT_MAX, UINT_MAX */ +#include "../common/mem.h" /* MEM_STATIC */ +#include "../common/pool.h" /* threadpool */ +#include "../common/threading.h" /* mutex */ +#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */ +#include "zstd_ldm.h" +#include "zstdmt_compress.h" + +/* Guards code to support resizing the SeqPool. + * We will want to resize the SeqPool to save memory in the future. + * Until then, comment the code out since it is unused. + */ +#define ZSTD_RESIZE_SEQPOOL 0 + +/* ====== Debug ====== */ +#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \ + && !defined(_MSC_VER) \ + && !defined(__MINGW32__) + +# include +# include +# include + +# define DEBUG_PRINTHEX(l,p,n) { \ + unsigned debug_u; \ + for (debug_u=0; debug_u<(n); debug_u++) \ + RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \ + RAWLOG(l, " \n"); \ +} + +static unsigned long long GetCurrentClockTimeMicroseconds(void) +{ + static clock_t _ticksPerSecond = 0; + if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK); + + { struct tms junk; clock_t newTicks = (clock_t) times(&junk); + return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); +} } + +#define MUTEX_WAIT_TIME_DLEVEL 6 +#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \ + if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \ + unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \ + ZSTD_pthread_mutex_lock(mutex); \ + { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \ + unsigned long long const elapsedTime = (afterTime-beforeTime); \ + if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \ + DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \ + elapsedTime, #mutex); \ + } } \ + } else { \ + ZSTD_pthread_mutex_lock(mutex); \ + } \ +} + +#else + +# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m) +# define DEBUG_PRINTHEX(l,p,n) {} + +#endif + + +/* ===== Buffer Pool ===== */ +/* a single Buffer Pool can be invoked from multiple threads in parallel */ + +typedef struct buffer_s { + void* start; + size_t capacity; +} buffer_t; + +static const buffer_t g_nullBuffer = { NULL, 0 }; + +typedef struct ZSTDMT_bufferPool_s { + ZSTD_pthread_mutex_t poolMutex; + size_t bufferSize; + unsigned totalBuffers; + unsigned nbBuffers; + ZSTD_customMem cMem; + buffer_t bTable[1]; /* variable size */ +} ZSTDMT_bufferPool; + +static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem) +{ + unsigned const maxNbBuffers = 2*nbWorkers + 3; + ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc( + sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); + if (bufPool==NULL) return NULL; + if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) { + ZSTD_free(bufPool, cMem); + return NULL; + } + bufPool->bufferSize = 64 KB; + bufPool->totalBuffers = maxNbBuffers; + bufPool->nbBuffers = 0; + bufPool->cMem = cMem; + return bufPool; +} + +static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool) +{ + unsigned u; + DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool); + if (!bufPool) return; /* compatibility with free on NULL */ + for (u=0; utotalBuffers; u++) { + DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start); + ZSTD_free(bufPool->bTable[u].start, bufPool->cMem); + } + ZSTD_pthread_mutex_destroy(&bufPool->poolMutex); + ZSTD_free(bufPool, bufPool->cMem); +} + +/* only works at initialization, not during compression */ +static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool) +{ + size_t const poolSize = sizeof(*bufPool) + + (bufPool->totalBuffers - 1) * sizeof(buffer_t); + unsigned u; + size_t totalBufferSize = 0; + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); + for (u=0; utotalBuffers; u++) + totalBufferSize += bufPool->bTable[u].capacity; + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); + + return poolSize + totalBufferSize; +} + +/* ZSTDMT_setBufferSize() : + * all future buffers provided by this buffer pool will have _at least_ this size + * note : it's better for all buffers to have same size, + * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */ +static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize) +{ + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); + DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize); + bufPool->bufferSize = bSize; + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); +} + + +static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers) +{ + unsigned const maxNbBuffers = 2*nbWorkers + 3; + if (srcBufPool==NULL) return NULL; + if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ + return srcBufPool; + /* need a larger buffer pool */ + { ZSTD_customMem const cMem = srcBufPool->cMem; + size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ + ZSTDMT_bufferPool* newBufPool; + ZSTDMT_freeBufferPool(srcBufPool); + newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + if (newBufPool==NULL) return newBufPool; + ZSTDMT_setBufferSize(newBufPool, bSize); + return newBufPool; + } +} + +/** ZSTDMT_getBuffer() : + * assumption : bufPool must be valid + * @return : a buffer, with start pointer and size + * note: allocation may fail, in this case, start==NULL and size==0 */ +static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool) +{ + size_t const bSize = bufPool->bufferSize; + DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize); + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); + if (bufPool->nbBuffers) { /* try to use an existing buffer */ + buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)]; + size_t const availBufferSize = buf.capacity; + bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer; + if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) { + /* large enough, but not too much */ + DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u", + bufPool->nbBuffers, (U32)buf.capacity); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); + return buf; + } + /* size conditions not respected : scratch this buffer, create new one */ + DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing"); + ZSTD_free(buf.start, bufPool->cMem); + } + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); + /* create new buffer */ + DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer"); + { buffer_t buffer; + void* const start = ZSTD_malloc(bSize, bufPool->cMem); + buffer.start = start; /* note : start can be NULL if malloc fails ! */ + buffer.capacity = (start==NULL) ? 0 : bSize; + if (start==NULL) { + DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!"); + } else { + DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize); + } + return buffer; + } +} + +#if ZSTD_RESIZE_SEQPOOL +/** ZSTDMT_resizeBuffer() : + * assumption : bufPool must be valid + * @return : a buffer that is at least the buffer pool buffer size. + * If a reallocation happens, the data in the input buffer is copied. + */ +static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer) +{ + size_t const bSize = bufPool->bufferSize; + if (buffer.capacity < bSize) { + void* const start = ZSTD_malloc(bSize, bufPool->cMem); + buffer_t newBuffer; + newBuffer.start = start; + newBuffer.capacity = start == NULL ? 0 : bSize; + if (start != NULL) { + assert(newBuffer.capacity >= buffer.capacity); + memcpy(newBuffer.start, buffer.start, buffer.capacity); + DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize); + return newBuffer; + } + DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!"); + } + return buffer; +} +#endif + +/* store buffer for later re-use, up to pool capacity */ +static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) +{ + DEBUGLOG(5, "ZSTDMT_releaseBuffer"); + if (buf.start == NULL) return; /* compatible with release on NULL */ + ZSTD_pthread_mutex_lock(&bufPool->poolMutex); + if (bufPool->nbBuffers < bufPool->totalBuffers) { + bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */ + DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u", + (U32)buf.capacity, (U32)(bufPool->nbBuffers-1)); + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); + return; + } + ZSTD_pthread_mutex_unlock(&bufPool->poolMutex); + /* Reached bufferPool capacity (should not happen) */ + DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing "); + ZSTD_free(buf.start, bufPool->cMem); +} + + +/* ===== Seq Pool Wrapper ====== */ + +static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0}; + +typedef ZSTDMT_bufferPool ZSTDMT_seqPool; + +static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool) +{ + return ZSTDMT_sizeof_bufferPool(seqPool); +} + +static rawSeqStore_t bufferToSeq(buffer_t buffer) +{ + rawSeqStore_t seq = {NULL, 0, 0, 0}; + seq.seq = (rawSeq*)buffer.start; + seq.capacity = buffer.capacity / sizeof(rawSeq); + return seq; +} + +static buffer_t seqToBuffer(rawSeqStore_t seq) +{ + buffer_t buffer; + buffer.start = seq.seq; + buffer.capacity = seq.capacity * sizeof(rawSeq); + return buffer; +} + +static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool) +{ + if (seqPool->bufferSize == 0) { + return kNullRawSeqStore; + } + return bufferToSeq(ZSTDMT_getBuffer(seqPool)); +} + +#if ZSTD_RESIZE_SEQPOOL +static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) +{ + return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq))); +} +#endif + +static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq) +{ + ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq)); +} + +static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) +{ + ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq)); +} + +static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) +{ + ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + if (seqPool == NULL) return NULL; + ZSTDMT_setNbSeq(seqPool, 0); + return seqPool; +} + +static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) +{ + ZSTDMT_freeBufferPool(seqPool); +} + +static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) +{ + return ZSTDMT_expandBufferPool(pool, nbWorkers); +} + + +/* ===== CCtx Pool ===== */ +/* a single CCtx Pool can be invoked from multiple threads in parallel */ + +typedef struct { + ZSTD_pthread_mutex_t poolMutex; + int totalCCtx; + int availCCtx; + ZSTD_customMem cMem; + ZSTD_CCtx* cctx[1]; /* variable size */ +} ZSTDMT_CCtxPool; + +/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */ +static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool) +{ + int cid; + for (cid=0; cidtotalCCtx; cid++) + ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */ + ZSTD_pthread_mutex_destroy(&pool->poolMutex); + ZSTD_free(pool, pool->cMem); +} + +/* ZSTDMT_createCCtxPool() : + * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */ +static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers, + ZSTD_customMem cMem) +{ + ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc( + sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem); + assert(nbWorkers > 0); + if (!cctxPool) return NULL; + if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) { + ZSTD_free(cctxPool, cMem); + return NULL; + } + cctxPool->cMem = cMem; + cctxPool->totalCCtx = nbWorkers; + cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */ + cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem); + if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; } + DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers); + return cctxPool; +} + +static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool, + int nbWorkers) +{ + if (srcPool==NULL) return NULL; + if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */ + /* need a larger cctx pool */ + { ZSTD_customMem const cMem = srcPool->cMem; + ZSTDMT_freeCCtxPool(srcPool); + return ZSTDMT_createCCtxPool(nbWorkers, cMem); + } +} + +/* only works during initialization phase, not during compression */ +static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool) +{ + ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); + { unsigned const nbWorkers = cctxPool->totalCCtx; + size_t const poolSize = sizeof(*cctxPool) + + (nbWorkers-1) * sizeof(ZSTD_CCtx*); + unsigned u; + size_t totalCCtxSize = 0; + for (u=0; ucctx[u]); + } + ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); + assert(nbWorkers > 0); + return poolSize + totalCCtxSize; + } +} + +static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool) +{ + DEBUGLOG(5, "ZSTDMT_getCCtx"); + ZSTD_pthread_mutex_lock(&cctxPool->poolMutex); + if (cctxPool->availCCtx) { + cctxPool->availCCtx--; + { ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx]; + ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); + return cctx; + } } + ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex); + DEBUGLOG(5, "create one more CCtx"); + return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */ +} + +static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return; /* compatibility with release on NULL */ + ZSTD_pthread_mutex_lock(&pool->poolMutex); + if (pool->availCCtx < pool->totalCCtx) + pool->cctx[pool->availCCtx++] = cctx; + else { + /* pool overflow : should not happen, since totalCCtx==nbWorkers */ + DEBUGLOG(4, "CCtx pool overflow : free cctx"); + ZSTD_freeCCtx(cctx); + } + ZSTD_pthread_mutex_unlock(&pool->poolMutex); +} + +/* ==== Serial State ==== */ + +typedef struct { + void const* start; + size_t size; +} range_t; + +typedef struct { + /* All variables in the struct are protected by mutex. */ + ZSTD_pthread_mutex_t mutex; + ZSTD_pthread_cond_t cond; + ZSTD_CCtx_params params; + ldmState_t ldmState; + XXH64_state_t xxhState; + unsigned nextJobID; + /* Protects ldmWindow. + * Must be acquired after the main mutex when acquiring both. + */ + ZSTD_pthread_mutex_t ldmWindowMutex; + ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */ + ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */ +} serialState_t; + +static int +ZSTDMT_serialState_reset(serialState_t* serialState, + ZSTDMT_seqPool* seqPool, + ZSTD_CCtx_params params, + size_t jobSize, + const void* dict, size_t const dictSize, + ZSTD_dictContentType_e dictContentType) +{ + /* Adjust parameters */ + if (params.ldmParams.enableLdm) { + DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); + ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); + assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); + assert(params.ldmParams.hashRateLog < 32); + serialState->ldmState.hashPower = + ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength); + } else { + memset(¶ms.ldmParams, 0, sizeof(params.ldmParams)); + } + serialState->nextJobID = 0; + if (params.fParams.checksumFlag) + XXH64_reset(&serialState->xxhState, 0); + if (params.ldmParams.enableLdm) { + ZSTD_customMem cMem = params.customMem; + unsigned const hashLog = params.ldmParams.hashLog; + size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); + unsigned const bucketLog = + params.ldmParams.hashLog - params.ldmParams.bucketSizeLog; + size_t const bucketSize = (size_t)1 << bucketLog; + unsigned const prevBucketLog = + serialState->params.ldmParams.hashLog - + serialState->params.ldmParams.bucketSizeLog; + /* Size the seq pool tables */ + ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize)); + /* Reset the window */ + ZSTD_window_init(&serialState->ldmState.window); + /* Resize tables and output space if necessary. */ + if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) { + ZSTD_free(serialState->ldmState.hashTable, cMem); + serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem); + } + if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) { + ZSTD_free(serialState->ldmState.bucketOffsets, cMem); + serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem); + } + if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets) + return 1; + /* Zero the tables */ + memset(serialState->ldmState.hashTable, 0, hashSize); + memset(serialState->ldmState.bucketOffsets, 0, bucketSize); + + /* Update window state and fill hash table with dict */ + serialState->ldmState.loadedDictEnd = 0; + if (dictSize > 0) { + if (dictContentType == ZSTD_dct_rawContent) { + BYTE const* const dictEnd = (const BYTE*)dict + dictSize; + ZSTD_window_update(&serialState->ldmState.window, dict, dictSize); + ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams); + serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base); + } else { + /* don't even load anything */ + } + } + + /* Initialize serialState's copy of ldmWindow. */ + serialState->ldmWindow = serialState->ldmState.window; + } + + serialState->params = params; + serialState->params.jobSize = (U32)jobSize; + return 0; +} + +static int ZSTDMT_serialState_init(serialState_t* serialState) +{ + int initError = 0; + memset(serialState, 0, sizeof(*serialState)); + initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL); + initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL); + initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL); + initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL); + return initError; +} + +static void ZSTDMT_serialState_free(serialState_t* serialState) +{ + ZSTD_customMem cMem = serialState->params.customMem; + ZSTD_pthread_mutex_destroy(&serialState->mutex); + ZSTD_pthread_cond_destroy(&serialState->cond); + ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex); + ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond); + ZSTD_free(serialState->ldmState.hashTable, cMem); + ZSTD_free(serialState->ldmState.bucketOffsets, cMem); +} + +static void ZSTDMT_serialState_update(serialState_t* serialState, + ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore, + range_t src, unsigned jobID) +{ + /* Wait for our turn */ + ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); + while (serialState->nextJobID < jobID) { + DEBUGLOG(5, "wait for serialState->cond"); + ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex); + } + /* A future job may error and skip our job */ + if (serialState->nextJobID == jobID) { + /* It is now our turn, do any processing necessary */ + if (serialState->params.ldmParams.enableLdm) { + size_t error; + assert(seqStore.seq != NULL && seqStore.pos == 0 && + seqStore.size == 0 && seqStore.capacity > 0); + assert(src.size <= serialState->params.jobSize); + ZSTD_window_update(&serialState->ldmState.window, src.start, src.size); + error = ZSTD_ldm_generateSequences( + &serialState->ldmState, &seqStore, + &serialState->params.ldmParams, src.start, src.size); + /* We provide a large enough buffer to never fail. */ + assert(!ZSTD_isError(error)); (void)error; + /* Update ldmWindow to match the ldmState.window and signal the main + * thread if it is waiting for a buffer. + */ + ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); + serialState->ldmWindow = serialState->ldmState.window; + ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); + ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); + } + if (serialState->params.fParams.checksumFlag && src.size > 0) + XXH64_update(&serialState->xxhState, src.start, src.size); + } + /* Now it is the next jobs turn */ + serialState->nextJobID++; + ZSTD_pthread_cond_broadcast(&serialState->cond); + ZSTD_pthread_mutex_unlock(&serialState->mutex); + + if (seqStore.size > 0) { + size_t const err = ZSTD_referenceExternalSequences( + jobCCtx, seqStore.seq, seqStore.size); + assert(serialState->params.ldmParams.enableLdm); + assert(!ZSTD_isError(err)); + (void)err; + } +} + +static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState, + unsigned jobID, size_t cSize) +{ + ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex); + if (serialState->nextJobID <= jobID) { + assert(ZSTD_isError(cSize)); (void)cSize; + DEBUGLOG(5, "Skipping past job %u because of error", jobID); + serialState->nextJobID = jobID + 1; + ZSTD_pthread_cond_broadcast(&serialState->cond); + + ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex); + ZSTD_window_clear(&serialState->ldmWindow); + ZSTD_pthread_cond_signal(&serialState->ldmWindowCond); + ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex); + } + ZSTD_pthread_mutex_unlock(&serialState->mutex); + +} + + +/* ------------------------------------------ */ +/* ===== Worker thread ===== */ +/* ------------------------------------------ */ + +static const range_t kNullRange = { NULL, 0 }; + +typedef struct { + size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */ + size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */ + ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */ + ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */ + ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */ + ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */ + ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */ + serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */ + buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */ + range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */ + range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */ + unsigned jobID; /* set by mtctx, then read by worker => no barrier */ + unsigned firstJob; /* set by mtctx, then read by worker => no barrier */ + unsigned lastJob; /* set by mtctx, then read by worker => no barrier */ + ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */ + const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */ + unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */ + size_t dstFlushed; /* used only by mtctx */ + unsigned frameChecksumNeeded; /* used only by mtctx */ +} ZSTDMT_jobDescription; + +#define JOB_ERROR(e) { \ + ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \ + job->cSize = e; \ + ZSTD_pthread_mutex_unlock(&job->job_mutex); \ + goto _endJob; \ +} + +/* ZSTDMT_compressionJob() is a POOL_function type */ +static void ZSTDMT_compressionJob(void* jobDescription) +{ + ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription; + ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */ + ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool); + rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool); + buffer_t dstBuff = job->dstBuff; + size_t lastCBlockSize = 0; + + /* resources */ + if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation)); + if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */ + dstBuff = ZSTDMT_getBuffer(job->bufPool); + if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); + job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ + } + if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) + JOB_ERROR(ERROR(memory_allocation)); + + /* Don't compute the checksum for chunks, since we compute it externally, + * but write it in the header. + */ + if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; + /* Don't run LDM for the chunks, since we handle it externally */ + jobParams.ldmParams.enableLdm = 0; + + + /* init */ + if (job->cdict) { + size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize); + assert(job->firstJob); /* only allowed for first job */ + if (ZSTD_isError(initError)) JOB_ERROR(initError); + } else { /* srcStart points at reloaded section */ + U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size; + { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob); + if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError); + } + { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, + job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */ + ZSTD_dtlm_fast, + NULL, /*cdict*/ + &jobParams, pledgedSrcSize); + if (ZSTD_isError(initError)) JOB_ERROR(initError); + } } + + /* Perform serial step as early as possible, but after CCtx initialization */ + ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID); + + if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */ + size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0); + if (ZSTD_isError(hSize)) JOB_ERROR(hSize); + DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize); + ZSTD_invalidateRepCodes(cctx); + } + + /* compress */ + { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX; + int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize); + const BYTE* ip = (const BYTE*) job->src.start; + BYTE* const ostart = (BYTE*)dstBuff.start; + BYTE* op = ostart; + BYTE* oend = op + dstBuff.capacity; + int chunkNb; + if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */ + DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks); + assert(job->cSize == 0); + for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) { + size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); + if (ZSTD_isError(cSize)) JOB_ERROR(cSize); + ip += chunkSize; + op += cSize; assert(op < oend); + /* stats */ + ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); + job->cSize += cSize; + job->consumed = chunkSize * chunkNb; + DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)", + (U32)cSize, (U32)job->cSize); + ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */ + ZSTD_pthread_mutex_unlock(&job->job_mutex); + } + /* last block */ + assert(chunkSize > 0); + assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */ + if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) { + size_t const lastBlockSize1 = job->src.size & (chunkSize-1); + size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1; + size_t const cSize = (job->lastJob) ? + ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) : + ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize); + if (ZSTD_isError(cSize)) JOB_ERROR(cSize); + lastCBlockSize = cSize; + } } + +_endJob: + ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); + if (job->prefix.size > 0) + DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start); + DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start); + /* release resources */ + ZSTDMT_releaseSeq(job->seqPool, rawSeqStore); + ZSTDMT_releaseCCtx(job->cctxPool, cctx); + /* report */ + ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); + if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); + job->cSize += lastCBlockSize; + job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */ + ZSTD_pthread_cond_signal(&job->job_cond); + ZSTD_pthread_mutex_unlock(&job->job_mutex); +} + + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ + +typedef struct { + range_t prefix; /* read-only non-owned prefix buffer */ + buffer_t buffer; + size_t filled; +} inBuff_t; + +typedef struct { + BYTE* buffer; /* The round input buffer. All jobs get references + * to pieces of the buffer. ZSTDMT_tryGetInputRange() + * handles handing out job input buffers, and makes + * sure it doesn't overlap with any pieces still in use. + */ + size_t capacity; /* The capacity of buffer. */ + size_t pos; /* The position of the current inBuff in the round + * buffer. Updated past the end if the inBuff once + * the inBuff is sent to the worker thread. + * pos <= capacity. + */ +} roundBuff_t; + +static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; + +#define RSYNC_LENGTH 32 + +typedef struct { + U64 hash; + U64 hitMask; + U64 primePower; +} rsyncState_t; + +struct ZSTDMT_CCtx_s { + POOL_ctx* factory; + ZSTDMT_jobDescription* jobs; + ZSTDMT_bufferPool* bufPool; + ZSTDMT_CCtxPool* cctxPool; + ZSTDMT_seqPool* seqPool; + ZSTD_CCtx_params params; + size_t targetSectionSize; + size_t targetPrefixSize; + int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */ + inBuff_t inBuff; + roundBuff_t roundBuff; + serialState_t serial; + rsyncState_t rsync; + unsigned singleBlockingThread; + unsigned jobIDMask; + unsigned doneJobID; + unsigned nextJobID; + unsigned frameEnded; + unsigned allJobsCompleted; + unsigned long long frameContentSize; + unsigned long long consumed; + unsigned long long produced; + ZSTD_customMem cMem; + ZSTD_CDict* cdictLocal; + const ZSTD_CDict* cdict; +}; + +static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem) +{ + U32 jobNb; + if (jobTable == NULL) return; + for (jobNb=0; jobNb mtctx->jobIDMask+1) { /* need more job capacity */ + ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); + mtctx->jobIDMask = 0; + mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem); + if (mtctx->jobs==NULL) return ERROR(memory_allocation); + assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */ + mtctx->jobIDMask = nbJobs - 1; + } + return 0; +} + + +/* ZSTDMT_CCtxParam_setNbWorkers(): + * Internal use only */ +size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers) +{ + return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers); +} + +MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem) +{ + ZSTDMT_CCtx* mtctx; + U32 nbJobs = nbWorkers + 2; + int initError; + DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers); + + if (nbWorkers < 1) return NULL; + nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX); + if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL)) + /* invalid custom allocator */ + return NULL; + + mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem); + if (!mtctx) return NULL; + ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); + mtctx->cMem = cMem; + mtctx->allJobsCompleted = 1; + mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem); + mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); + assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ + mtctx->jobIDMask = nbJobs - 1; + mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); + mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); + initError = ZSTDMT_serialState_init(&mtctx->serial); + mtctx->roundBuff = kNullRoundBuff; + if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) { + ZSTDMT_freeCCtx(mtctx); + return NULL; + } + DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers); + return mtctx; +} + +ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem) +{ +#ifdef ZSTD_MULTITHREAD + return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem); +#else + (void)nbWorkers; + (void)cMem; + return NULL; +#endif +} + +ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers) +{ + return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem); +} + + +/* ZSTDMT_releaseAllJobResources() : + * note : ensure all workers are killed first ! */ +static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx) +{ + unsigned jobID; + DEBUGLOG(3, "ZSTDMT_releaseAllJobResources"); + for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) { + /* Copy the mutex/cond out */ + ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex; + ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond; + + DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start); + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); + + /* Clear the job description, but keep the mutex/cond */ + memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID])); + mtctx->jobs[jobID].job_mutex = mutex; + mtctx->jobs[jobID].job_cond = cond; + } + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + mtctx->allJobsCompleted = 1; +} + +static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx) +{ + DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted"); + while (mtctx->doneJobID < mtctx->nextJobID) { + unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask; + ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex); + while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { + DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */ + ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); + } + ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); + mtctx->doneJobID++; + } +} + +size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx) +{ + if (mtctx==NULL) return 0; /* compatible with free on NULL */ + POOL_free(mtctx->factory); /* stop and free worker threads */ + ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */ + ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem); + ZSTDMT_freeBufferPool(mtctx->bufPool); + ZSTDMT_freeCCtxPool(mtctx->cctxPool); + ZSTDMT_freeSeqPool(mtctx->seqPool); + ZSTDMT_serialState_free(&mtctx->serial); + ZSTD_freeCDict(mtctx->cdictLocal); + if (mtctx->roundBuff.buffer) + ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); + ZSTD_free(mtctx, mtctx->cMem); + return 0; +} + +size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx) +{ + if (mtctx == NULL) return 0; /* supports sizeof NULL */ + return sizeof(*mtctx) + + POOL_sizeof(mtctx->factory) + + ZSTDMT_sizeof_bufferPool(mtctx->bufPool) + + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription) + + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool) + + ZSTDMT_sizeof_seqPool(mtctx->seqPool) + + ZSTD_sizeof_CDict(mtctx->cdictLocal) + + mtctx->roundBuff.capacity; +} + +/* Internal only */ +size_t +ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, + ZSTDMT_parameter parameter, + int value) +{ + DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter"); + switch(parameter) + { + case ZSTDMT_p_jobSize : + DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value); + return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value); + case ZSTDMT_p_overlapLog : + DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value); + return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value); + case ZSTDMT_p_rsyncable : + DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value); + return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value); + default : + return ERROR(parameter_unsupported); + } +} + +size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value) +{ + DEBUGLOG(4, "ZSTDMT_setMTCtxParameter"); + return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value); +} + +size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value) +{ + switch (parameter) { + case ZSTDMT_p_jobSize: + return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value); + case ZSTDMT_p_overlapLog: + return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value); + case ZSTDMT_p_rsyncable: + return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value); + default: + return ERROR(parameter_unsupported); + } +} + +/* Sets parameters relevant to the compression job, + * initializing others to default values. */ +static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params) +{ + ZSTD_CCtx_params jobParams = *params; + /* Clear parameters related to multithreading */ + jobParams.forceWindow = 0; + jobParams.nbWorkers = 0; + jobParams.jobSize = 0; + jobParams.overlapLog = 0; + jobParams.rsyncable = 0; + memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t)); + memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem)); + return jobParams; +} + + +/* ZSTDMT_resize() : + * @return : error code if fails, 0 on success */ +static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) +{ + if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); + FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); + mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers); + if (mtctx->bufPool == NULL) return ERROR(memory_allocation); + mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); + if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); + mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers); + if (mtctx->seqPool == NULL) return ERROR(memory_allocation); + ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers); + return 0; +} + + +/*! ZSTDMT_updateCParams_whileCompressing() : + * Updates a selected set of compression parameters, remaining compatible with currently active frame. + * New parameters will be applied to next compression job. */ +void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams) +{ + U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */ + int const compressionLevel = cctxParams->compressionLevel; + DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)", + compressionLevel); + mtctx->params.compressionLevel = compressionLevel; + { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0); + cParams.windowLog = saved_wlog; + mtctx->params.cParams = cParams; + } +} + +/* ZSTDMT_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads. + * Note : mutex will be acquired during statistics collection inside workers. */ +ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx) +{ + ZSTD_frameProgression fps; + DEBUGLOG(5, "ZSTDMT_getFrameProgression"); + fps.ingested = mtctx->consumed + mtctx->inBuff.filled; + fps.consumed = mtctx->consumed; + fps.produced = fps.flushed = mtctx->produced; + fps.currentJobID = mtctx->nextJobID; + fps.nbActiveWorkers = 0; + { unsigned jobNb; + unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1); + DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)", + mtctx->doneJobID, lastJobNb, mtctx->jobReady) + for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) { + unsigned const wJobID = jobNb & mtctx->jobIDMask; + ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID]; + ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); + { size_t const cResult = jobPtr->cSize; + size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; + size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; + assert(flushed <= produced); + fps.ingested += jobPtr->src.size; + fps.consumed += jobPtr->consumed; + fps.produced += produced; + fps.flushed += flushed; + fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size); + } + ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); + } + } + return fps; +} + + +size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) +{ + size_t toFlush; + unsigned const jobID = mtctx->doneJobID; + assert(jobID <= mtctx->nextJobID); + if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */ + + /* look into oldest non-fully-flushed job */ + { unsigned const wJobID = jobID & mtctx->jobIDMask; + ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID]; + ZSTD_pthread_mutex_lock(&jobPtr->job_mutex); + { size_t const cResult = jobPtr->cSize; + size_t const produced = ZSTD_isError(cResult) ? 0 : cResult; + size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed; + assert(flushed <= produced); + assert(jobPtr->consumed <= jobPtr->src.size); + toFlush = produced - flushed; + /* if toFlush==0, nothing is available to flush. + * However, jobID is expected to still be active: + * if jobID was already completed and fully flushed, + * ZSTDMT_flushProduced() should have already moved onto next job. + * Therefore, some input has not yet been consumed. */ + if (toFlush==0) { + assert(jobPtr->consumed < jobPtr->src.size); + } + } + ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); + } + + return toFlush; +} + + +/* ------------------------------------------ */ +/* ===== Multi-threaded compression ===== */ +/* ------------------------------------------ */ + +static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) +{ + unsigned jobLog; + if (params->ldmParams.enableLdm) { + /* In Long Range Mode, the windowLog is typically oversized. + * In which case, it's preferable to determine the jobSize + * based on chainLog instead. */ + jobLog = MAX(21, params->cParams.chainLog + 4); + } else { + jobLog = MAX(20, params->cParams.windowLog + 2); + } + return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX); +} + +static int ZSTDMT_overlapLog_default(ZSTD_strategy strat) +{ + switch(strat) + { + case ZSTD_btultra2: + return 9; + case ZSTD_btultra: + case ZSTD_btopt: + return 8; + case ZSTD_btlazy2: + case ZSTD_lazy2: + return 7; + case ZSTD_lazy: + case ZSTD_greedy: + case ZSTD_dfast: + case ZSTD_fast: + default:; + } + return 6; +} + +static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat) +{ + assert(0 <= ovlog && ovlog <= 9); + if (ovlog == 0) return ZSTDMT_overlapLog_default(strat); + return ovlog; +} + +static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) +{ + int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); + int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); + assert(0 <= overlapRLog && overlapRLog <= 8); + if (params->ldmParams.enableLdm) { + /* In Long Range Mode, the windowLog is typically oversized. + * In which case, it's preferable to determine the jobSize + * based on chainLog instead. + * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */ + ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) + - overlapRLog; + } + assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX); + DEBUGLOG(4, "overlapLog : %i", params->overlapLog); + DEBUGLOG(4, "overlap size : %i", 1 << ovLog); + return (ovLog==0) ? 0 : (size_t)1 << ovLog; +} + +static unsigned +ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers) +{ + assert(nbWorkers>0); + { size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params); + size_t const jobMaxSize = jobSizeTarget << 2; + size_t const passSizeMax = jobMaxSize * nbWorkers; + unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1; + unsigned const nbJobsLarge = multiplier * nbWorkers; + unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1; + unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers); + return (multiplier>1) ? nbJobsLarge : nbJobsSmall; +} } + +/* ZSTDMT_compress_advanced_internal() : + * This is a blocking function : it will only give back control to caller after finishing its compression job. + */ +static size_t +ZSTDMT_compress_advanced_internal( + ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params) +{ + ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(¶ms); + size_t const overlapSize = ZSTDMT_computeOverlapSize(¶ms); + unsigned const nbJobs = ZSTDMT_computeNbJobs(¶ms, srcSize, params.nbWorkers); + size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs; + size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */ + const char* const srcStart = (const char*)src; + size_t remainingSrcSize = srcSize; + unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */ + size_t frameStartPos = 0, dstBufferPos = 0; + assert(jobParams.nbWorkers == 0); + assert(mtctx->cctxPool->totalCCtx == params.nbWorkers); + + params.jobSize = (U32)avgJobSize; + DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ", + nbJobs, (U32)proposedJobSize, (U32)avgJobSize); + + if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */ + ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0]; + DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode"); + if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams); + return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams); + } + + assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */ + ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) ); + /* LDM doesn't even try to load the dictionary in single-ingestion mode */ + if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize, NULL, 0, ZSTD_dct_auto)) + return ERROR(memory_allocation); + + FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) , ""); /* only expands if necessary */ + + { unsigned u; + for (u=0; ujobs[u].prefix.start = srcStart + frameStartPos - dictSize; + mtctx->jobs[u].prefix.size = dictSize; + mtctx->jobs[u].src.start = srcStart + frameStartPos; + mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */ + mtctx->jobs[u].consumed = 0; + mtctx->jobs[u].cSize = 0; + mtctx->jobs[u].cdict = (u==0) ? cdict : NULL; + mtctx->jobs[u].fullFrameSize = srcSize; + mtctx->jobs[u].params = jobParams; + /* do not calculate checksum within sections, but write it in header for first section */ + mtctx->jobs[u].dstBuff = dstBuffer; + mtctx->jobs[u].cctxPool = mtctx->cctxPool; + mtctx->jobs[u].bufPool = mtctx->bufPool; + mtctx->jobs[u].seqPool = mtctx->seqPool; + mtctx->jobs[u].serial = &mtctx->serial; + mtctx->jobs[u].jobID = u; + mtctx->jobs[u].firstJob = (u==0); + mtctx->jobs[u].lastJob = (u==nbJobs-1); + + DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize); + DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12); + POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]); + + frameStartPos += jobSize; + dstBufferPos += dstBufferCapacity; + remainingSrcSize -= jobSize; + } } + + /* collect result */ + { size_t error = 0, dstPos = 0; + unsigned jobID; + for (jobID=0; jobIDjobs[jobID].job_mutex); + while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) { + DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID); + ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex); + } + ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex); + DEBUGLOG(5, "ready to write job %u ", jobID); + + { size_t const cSize = mtctx->jobs[jobID].cSize; + if (ZSTD_isError(cSize)) error = cSize; + if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall); + if (jobID) { /* note : job 0 is written directly at dst, which is correct position */ + if (!error) + memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */ + if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */ + DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst); + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff); + } } + mtctx->jobs[jobID].dstBuff = g_nullBuffer; + mtctx->jobs[jobID].cSize = 0; + dstPos += cSize ; + } + } /* for (jobID=0; jobIDserial.xxhState); + if (dstPos + 4 > dstCapacity) { + error = ERROR(dstSize_tooSmall); + } else { + DEBUGLOG(4, "writing checksum : %08X \n", checksum); + MEM_writeLE32((char*)dst + dstPos, checksum); + dstPos += 4; + } } + + if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos); + return error ? error : dstPos; + } +} + +size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, + ZSTD_parameters params, + int overlapLog) +{ + ZSTD_CCtx_params cctxParams = mtctx->params; + cctxParams.cParams = params.cParams; + cctxParams.fParams = params.fParams; + assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX); + cctxParams.overlapLog = overlapLog; + return ZSTDMT_compress_advanced_internal(mtctx, + dst, dstCapacity, + src, srcSize, + cdict, cctxParams); +} + + +size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel) +{ + ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0); + int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy); + params.fParams.contentSizeFlag = 1; + return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog); +} + + +/* ====================================== */ +/* ======= Streaming API ======= */ +/* ====================================== */ + +size_t ZSTDMT_initCStream_internal( + ZSTDMT_CCtx* mtctx, + const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, + const ZSTD_CDict* cdict, ZSTD_CCtx_params params, + unsigned long long pledgedSrcSize) +{ + DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)", + (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx); + + /* params supposed partially fully validated at this point */ + assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams))); + assert(!((dict) && (cdict))); /* either dict or cdict, not both */ + + /* init */ + if (params.nbWorkers != mtctx->params.nbWorkers) + FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , ""); + + if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN; + if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX; + + mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */ + if (mtctx->singleBlockingThread) { + ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(¶ms); + DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode"); + assert(singleThreadParams.nbWorkers == 0); + return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0], + dict, dictSize, cdict, + &singleThreadParams, pledgedSrcSize); + } + + DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers); + + if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */ + ZSTDMT_waitForAllJobsCompleted(mtctx); + ZSTDMT_releaseAllJobResources(mtctx); + mtctx->allJobsCompleted = 1; + } + + mtctx->params = params; + mtctx->frameContentSize = pledgedSrcSize; + if (dict) { + ZSTD_freeCDict(mtctx->cdictLocal); + mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, + ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */ + params.cParams, mtctx->cMem); + mtctx->cdict = mtctx->cdictLocal; + if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation); + } else { + ZSTD_freeCDict(mtctx->cdictLocal); + mtctx->cdictLocal = NULL; + mtctx->cdict = cdict; + } + + mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms); + DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10)); + mtctx->targetSectionSize = params.jobSize; + if (mtctx->targetSectionSize == 0) { + mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms); + } + assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX); + + if (params.rsyncable) { + /* Aim for the targetsectionSize as the average job size. */ + U32 const jobSizeMB = (U32)(mtctx->targetSectionSize >> 20); + U32 const rsyncBits = ZSTD_highbit32(jobSizeMB) + 20; + assert(jobSizeMB >= 1); + DEBUGLOG(4, "rsyncLog = %u", rsyncBits); + mtctx->rsync.hash = 0; + mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; + mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH); + } + if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */ + DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize); + DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10)); + ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); + { + /* If ldm is enabled we need windowSize space. */ + size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; + /* Two buffers of slack, plus extra space for the overlap + * This is the minimum slack that LDM works with. One extra because + * flush might waste up to targetSectionSize-1 bytes. Another extra + * for the overlap (if > 0), then one to fill which doesn't overlap + * with the LDM window. + */ + size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0); + size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers; + /* Compute the total size, and always have enough slack */ + size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1); + size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers; + size_t const capacity = MAX(windowSize, sectionsSize) + slackSize; + if (mtctx->roundBuff.capacity < capacity) { + if (mtctx->roundBuff.buffer) + ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem); + mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem); + if (mtctx->roundBuff.buffer == NULL) { + mtctx->roundBuff.capacity = 0; + return ERROR(memory_allocation); + } + mtctx->roundBuff.capacity = capacity; + } + } + DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10)); + mtctx->roundBuff.pos = 0; + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + mtctx->inBuff.prefix = kNullRange; + mtctx->doneJobID = 0; + mtctx->nextJobID = 0; + mtctx->frameEnded = 0; + mtctx->allJobsCompleted = 0; + mtctx->consumed = 0; + mtctx->produced = 0; + if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize, + dict, dictSize, dictContentType)) + return ERROR(memory_allocation); + return 0; +} + +size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, + unsigned long long pledgedSrcSize) +{ + ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ + DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize); + cctxParams.cParams = params.cParams; + cctxParams.fParams = params.fParams; + return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL, + cctxParams, pledgedSrcSize); +} + +size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, + const ZSTD_CDict* cdict, + ZSTD_frameParameters fParams, + unsigned long long pledgedSrcSize) +{ + ZSTD_CCtx_params cctxParams = mtctx->params; + if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */ + cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict); + cctxParams.fParams = fParams; + return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict, + cctxParams, pledgedSrcSize); +} + + +/* ZSTDMT_resetCStream() : + * pledgedSrcSize can be zero == unknown (for the time being) + * prefer using ZSTD_CONTENTSIZE_UNKNOWN, + * as `0` might mean "empty" in the future */ +size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize) +{ + if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; + return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params, + pledgedSrcSize); +} + +size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) { + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0); + ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */ + DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel); + cctxParams.cParams = params.cParams; + cctxParams.fParams = params.fParams; + return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN); +} + + +/* ZSTDMT_writeLastEmptyBlock() + * Write a single empty block with an end-of-frame to finish a frame. + * Job must be created from streaming variant. + * This function is always successful if expected conditions are fulfilled. + */ +static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job) +{ + assert(job->lastJob == 1); + assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */ + assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */ + assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */ + job->dstBuff = ZSTDMT_getBuffer(job->bufPool); + if (job->dstBuff.start == NULL) { + job->cSize = ERROR(memory_allocation); + return; + } + assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */ + job->src = kNullRange; + job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); + assert(!ZSTD_isError(job->cSize)); + assert(job->consumed == 0); +} + +static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp) +{ + unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask; + int const endFrame = (endOp == ZSTD_e_end); + + if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) { + DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full"); + assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask)); + return 0; + } + + if (!mtctx->jobReady) { + BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start; + DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ", + mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size); + mtctx->jobs[jobID].src.start = src; + mtctx->jobs[jobID].src.size = srcSize; + assert(mtctx->inBuff.filled >= srcSize); + mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix; + mtctx->jobs[jobID].consumed = 0; + mtctx->jobs[jobID].cSize = 0; + mtctx->jobs[jobID].params = mtctx->params; + mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL; + mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize; + mtctx->jobs[jobID].dstBuff = g_nullBuffer; + mtctx->jobs[jobID].cctxPool = mtctx->cctxPool; + mtctx->jobs[jobID].bufPool = mtctx->bufPool; + mtctx->jobs[jobID].seqPool = mtctx->seqPool; + mtctx->jobs[jobID].serial = &mtctx->serial; + mtctx->jobs[jobID].jobID = mtctx->nextJobID; + mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0); + mtctx->jobs[jobID].lastJob = endFrame; + mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0); + mtctx->jobs[jobID].dstFlushed = 0; + + /* Update the round buffer pos and clear the input buffer to be reset */ + mtctx->roundBuff.pos += srcSize; + mtctx->inBuff.buffer = g_nullBuffer; + mtctx->inBuff.filled = 0; + /* Set the prefix */ + if (!endFrame) { + size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize); + mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize; + mtctx->inBuff.prefix.size = newPrefixSize; + } else { /* endFrame==1 => no need for another input buffer */ + mtctx->inBuff.prefix = kNullRange; + mtctx->frameEnded = endFrame; + if (mtctx->nextJobID == 0) { + /* single job exception : checksum is already calculated directly within worker thread */ + mtctx->params.fParams.checksumFlag = 0; + } } + + if ( (srcSize == 0) + && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) { + DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame"); + assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */ + ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID); + mtctx->nextJobID++; + return 0; + } + } + + DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))", + mtctx->nextJobID, + (U32)mtctx->jobs[jobID].src.size, + mtctx->jobs[jobID].lastJob, + mtctx->nextJobID, + jobID); + if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) { + mtctx->nextJobID++; + mtctx->jobReady = 0; + } else { + DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID); + mtctx->jobReady = 1; + } + return 0; +} + + +/*! ZSTDMT_flushProduced() : + * flush whatever data has been produced but not yet flushed in current job. + * move to next job if current one is fully flushed. + * `output` : `pos` will be updated with amount of data flushed . + * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush . + * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */ +static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end) +{ + unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask; + DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)", + blockToFlush, mtctx->doneJobID, mtctx->nextJobID); + assert(output->size >= output->pos); + + ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); + if ( blockToFlush + && (mtctx->doneJobID < mtctx->nextJobID) ) { + assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); + while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ + if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) { + DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none", + mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size); + break; + } + DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)", + mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); + ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */ + } } + + /* try to flush something */ + { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ + size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */ + size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */ + ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); + if (ZSTD_isError(cSize)) { + DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s", + mtctx->doneJobID, ZSTD_getErrorName(cSize)); + ZSTDMT_waitForAllJobsCompleted(mtctx); + ZSTDMT_releaseAllJobResources(mtctx); + return cSize; + } + /* add frame checksum if necessary (can only happen once) */ + assert(srcConsumed <= srcSize); + if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */ + && mtctx->jobs[wJobID].frameChecksumNeeded ) { + U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState); + DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum); + MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); + cSize += 4; + mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ + mtctx->jobs[wJobID].frameChecksumNeeded = 0; + } + + if (cSize > 0) { /* compression is ongoing or completed */ + size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); + DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)", + (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); + assert(mtctx->doneJobID < mtctx->nextJobID); + assert(cSize >= mtctx->jobs[wJobID].dstFlushed); + assert(mtctx->jobs[wJobID].dstBuff.start != NULL); + if (toFlush > 0) { + memcpy((char*)output->dst + output->pos, + (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed, + toFlush); + } + output->pos += toFlush; + mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */ + + if ( (srcConsumed == srcSize) /* job is completed */ + && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */ + DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one", + mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed); + ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff); + DEBUGLOG(5, "dstBuffer released"); + mtctx->jobs[wJobID].dstBuff = g_nullBuffer; + mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */ + mtctx->consumed += srcSize; + mtctx->produced += cSize; + mtctx->doneJobID++; + } } + + /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */ + if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); + if (srcSize > srcConsumed) return 1; /* current job not completely compressed */ + } + if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */ + if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */ + if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */ + mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */ + if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */ + return 0; /* internal buffers fully flushed */ +} + +/** + * Returns the range of data used by the earliest job that is not yet complete. + * If the data of the first job is broken up into two segments, we cover both + * sections. + */ +static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) +{ + unsigned const firstJobID = mtctx->doneJobID; + unsigned const lastJobID = mtctx->nextJobID; + unsigned jobID; + + for (jobID = firstJobID; jobID < lastJobID; ++jobID) { + unsigned const wJobID = jobID & mtctx->jobIDMask; + size_t consumed; + + ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex); + consumed = mtctx->jobs[wJobID].consumed; + ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex); + + if (consumed < mtctx->jobs[wJobID].src.size) { + range_t range = mtctx->jobs[wJobID].prefix; + if (range.size == 0) { + /* Empty prefix */ + range = mtctx->jobs[wJobID].src; + } + /* Job source in multiple segments not supported yet */ + assert(range.start <= mtctx->jobs[wJobID].src.start); + return range; + } + } + return kNullRange; +} + +/** + * Returns non-zero iff buffer and range overlap. + */ +static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) +{ + BYTE const* const bufferStart = (BYTE const*)buffer.start; + BYTE const* const bufferEnd = bufferStart + buffer.capacity; + BYTE const* const rangeStart = (BYTE const*)range.start; + BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart; + + if (rangeStart == NULL || bufferStart == NULL) + return 0; + /* Empty ranges cannot overlap */ + if (bufferStart == bufferEnd || rangeStart == rangeEnd) + return 0; + + return bufferStart < rangeEnd && rangeStart < bufferEnd; +} + +static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) +{ + range_t extDict; + range_t prefix; + + DEBUGLOG(5, "ZSTDMT_doesOverlapWindow"); + extDict.start = window.dictBase + window.lowLimit; + extDict.size = window.dictLimit - window.lowLimit; + + prefix.start = window.base + window.dictLimit; + prefix.size = window.nextSrc - (window.base + window.dictLimit); + DEBUGLOG(5, "extDict [0x%zx, 0x%zx)", + (size_t)extDict.start, + (size_t)extDict.start + extDict.size); + DEBUGLOG(5, "prefix [0x%zx, 0x%zx)", + (size_t)prefix.start, + (size_t)prefix.start + prefix.size); + + return ZSTDMT_isOverlapped(buffer, extDict) + || ZSTDMT_isOverlapped(buffer, prefix); +} + +static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) +{ + if (mtctx->params.ldmParams.enableLdm) { + ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; + DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); + DEBUGLOG(5, "source [0x%zx, 0x%zx)", + (size_t)buffer.start, + (size_t)buffer.start + buffer.capacity); + ZSTD_PTHREAD_MUTEX_LOCK(mutex); + while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) { + DEBUGLOG(5, "Waiting for LDM to finish..."); + ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex); + } + DEBUGLOG(6, "Done waiting for LDM to finish"); + ZSTD_pthread_mutex_unlock(mutex); + } +} + +/** + * Attempts to set the inBuff to the next section to fill. + * If any part of the new section is still in use we give up. + * Returns non-zero if the buffer is filled. + */ +static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx) +{ + range_t const inUse = ZSTDMT_getInputDataInUse(mtctx); + size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos; + size_t const target = mtctx->targetSectionSize; + buffer_t buffer; + + DEBUGLOG(5, "ZSTDMT_tryGetInputRange"); + assert(mtctx->inBuff.buffer.start == NULL); + assert(mtctx->roundBuff.capacity >= target); + + if (spaceLeft < target) { + /* ZSTD_invalidateRepCodes() doesn't work for extDict variants. + * Simply copy the prefix to the beginning in that case. + */ + BYTE* const start = (BYTE*)mtctx->roundBuff.buffer; + size_t const prefixSize = mtctx->inBuff.prefix.size; + + buffer.start = start; + buffer.capacity = prefixSize; + if (ZSTDMT_isOverlapped(buffer, inUse)) { + DEBUGLOG(5, "Waiting for buffer..."); + return 0; + } + ZSTDMT_waitForLdmComplete(mtctx, buffer); + memmove(start, mtctx->inBuff.prefix.start, prefixSize); + mtctx->inBuff.prefix.start = start; + mtctx->roundBuff.pos = prefixSize; + } + buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos; + buffer.capacity = target; + + if (ZSTDMT_isOverlapped(buffer, inUse)) { + DEBUGLOG(5, "Waiting for buffer..."); + return 0; + } + assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix)); + + ZSTDMT_waitForLdmComplete(mtctx, buffer); + + DEBUGLOG(5, "Using prefix range [%zx, %zx)", + (size_t)mtctx->inBuff.prefix.start, + (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size); + DEBUGLOG(5, "Using source range [%zx, %zx)", + (size_t)buffer.start, + (size_t)buffer.start + buffer.capacity); + + + mtctx->inBuff.buffer = buffer; + mtctx->inBuff.filled = 0; + assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity); + return 1; +} + +typedef struct { + size_t toLoad; /* The number of bytes to load from the input. */ + int flush; /* Boolean declaring if we must flush because we found a synchronization point. */ +} syncPoint_t; + +/** + * Searches through the input for a synchronization point. If one is found, we + * will instruct the caller to flush, and return the number of bytes to load. + * Otherwise, we will load as many bytes as possible and instruct the caller + * to continue as normal. + */ +static syncPoint_t +findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) +{ + BYTE const* const istart = (BYTE const*)input.src + input.pos; + U64 const primePower = mtctx->rsync.primePower; + U64 const hitMask = mtctx->rsync.hitMask; + + syncPoint_t syncPoint; + U64 hash; + BYTE const* prev; + size_t pos; + + syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled); + syncPoint.flush = 0; + if (!mtctx->params.rsyncable) + /* Rsync is disabled. */ + return syncPoint; + if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) + /* Not enough to compute the hash. + * We will miss any synchronization points in this RSYNC_LENGTH byte + * window. However, since it depends only in the internal buffers, if the + * state is already synchronized, we will remain synchronized. + * Additionally, the probability that we miss a synchronization point is + * low: RSYNC_LENGTH / targetSectionSize. + */ + return syncPoint; + /* Initialize the loop variables. */ + if (mtctx->inBuff.filled >= RSYNC_LENGTH) { + /* We have enough bytes buffered to initialize the hash. + * Start scanning at the beginning of the input. + */ + pos = 0; + prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); + } else { + /* We don't have enough bytes buffered to initialize the hash, but + * we know we have at least RSYNC_LENGTH bytes total. + * Start scanning after the first RSYNC_LENGTH bytes less the bytes + * already buffered. + */ + pos = RSYNC_LENGTH - mtctx->inBuff.filled; + prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; + hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); + hash = ZSTD_rollingHash_append(hash, istart, pos); + } + /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll + * through the input. If we hit a synchronization point, then cut the + * job off, and tell the compressor to flush the job. Otherwise, load + * all the bytes and continue as normal. + * If we go too long without a synchronization point (targetSectionSize) + * then a block will be emitted anyways, but this is okay, since if we + * are already synchronized we will remain synchronized. + */ + for (; pos < syncPoint.toLoad; ++pos) { + BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; + /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ + hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); + if ((hash & hitMask) == hitMask) { + syncPoint.toLoad = pos + 1; + syncPoint.flush = 1; + break; + } + } + return syncPoint; +} + +size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx) +{ + size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled; + if (hintInSize==0) hintInSize = mtctx->targetSectionSize; + return hintInSize; +} + +/** ZSTDMT_compressStream_generic() : + * internal use only - exposed to be invoked from zstd_compress.c + * assumption : output and input are valid (pos <= size) + * @return : minimum amount of data remaining to flush, 0 if none */ +size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp) +{ + unsigned forwardInputProgress = 0; + DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)", + (U32)endOp, (U32)(input->size - input->pos)); + assert(output->pos <= output->size); + assert(input->pos <= input->size); + + if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */ + return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp); + } + + if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) { + /* current frame being ended. Only flush/end are allowed */ + return ERROR(stage_wrong); + } + + /* single-pass shortcut (note : synchronous-mode) */ + if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */ + && (mtctx->nextJobID == 0) /* just started */ + && (mtctx->inBuff.filled == 0) /* nothing buffered */ + && (!mtctx->jobReady) /* no job already created */ + && (endOp == ZSTD_e_end) /* end order */ + && (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */ + size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx, + (char*)output->dst + output->pos, output->size - output->pos, + (const char*)input->src + input->pos, input->size - input->pos, + mtctx->cdict, mtctx->params); + if (ZSTD_isError(cSize)) return cSize; + input->pos = input->size; + output->pos += cSize; + mtctx->allJobsCompleted = 1; + mtctx->frameEnded = 1; + return 0; + } + + /* fill input buffer */ + if ( (!mtctx->jobReady) + && (input->size > input->pos) ) { /* support NULL input */ + if (mtctx->inBuff.buffer.start == NULL) { + assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */ + if (!ZSTDMT_tryGetInputRange(mtctx)) { + /* It is only possible for this operation to fail if there are + * still compression jobs ongoing. + */ + DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed"); + assert(mtctx->doneJobID != mtctx->nextJobID); + } else + DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start); + } + if (mtctx->inBuff.buffer.start != NULL) { + syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input); + if (syncPoint.flush && endOp == ZSTD_e_continue) { + endOp = ZSTD_e_flush; + } + assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize); + DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u", + (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize); + memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad); + input->pos += syncPoint.toLoad; + mtctx->inBuff.filled += syncPoint.toLoad; + forwardInputProgress = syncPoint.toLoad>0; + } + if ((input->pos < input->size) && (endOp == ZSTD_e_end)) + endOp = ZSTD_e_flush; /* can't end now : not all input consumed */ + } + + if ( (mtctx->jobReady) + || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */ + || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */ + || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */ + size_t const jobSize = mtctx->inBuff.filled; + assert(mtctx->inBuff.filled <= mtctx->targetSectionSize); + FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , ""); + } + + /* check for potential compressed data ready to be flushed */ + { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */ + if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */ + DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush); + return remainingToFlush; + } +} + + +size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) , ""); + + /* recommended next input size : fill current input buffer */ + return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */ +} + + +static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame) +{ + size_t const srcSize = mtctx->inBuff.filled; + DEBUGLOG(5, "ZSTDMT_flushStream_internal"); + + if ( mtctx->jobReady /* one job ready for a worker to pick up */ + || (srcSize > 0) /* still some data within input buffer */ + || ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */ + DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)", + (U32)srcSize, (U32)endFrame); + FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) , ""); + } + + /* check if there is any data available to flush */ + return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame); +} + + +size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) +{ + DEBUGLOG(5, "ZSTDMT_flushStream"); + if (mtctx->singleBlockingThread) + return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output); + return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush); +} + +size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output) +{ + DEBUGLOG(4, "ZSTDMT_endStream"); + if (mtctx->singleBlockingThread) + return ZSTD_endStream(mtctx->cctxPool->cctx[0], output); + return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end); +} diff --git a/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.h b/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.h new file mode 100644 index 0000000000..89914eb7f8 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/compress/zstdmt_compress.h @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + #ifndef ZSTDMT_COMPRESS_H + #define ZSTDMT_COMPRESS_H + + #if defined (__cplusplus) + extern "C" { + #endif + + +/* Note : This is an internal API. + * These APIs used to be exposed with ZSTDLIB_API, + * because it used to be the only way to invoke MT compression. + * Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2() + * instead. + * + * If you depend on these APIs and can't switch, then define + * ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library. + * However, we may completely remove these functions in a future + * release, so please switch soon. + * + * This API requires ZSTD_MULTITHREAD to be defined during compilation, + * otherwise ZSTDMT_createCCtx*() will fail. + */ + +#ifdef ZSTD_LEGACY_MULTITHREADED_API +# define ZSTDMT_API ZSTDLIB_API +#else +# define ZSTDMT_API +#endif + +/* === Dependencies === */ +#include /* size_t */ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters */ +#include "../zstd.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTDLIB_API */ + + +/* === Constants === */ +#ifndef ZSTDMT_NBWORKERS_MAX +# define ZSTDMT_NBWORKERS_MAX 200 +#endif +#ifndef ZSTDMT_JOBSIZE_MIN +# define ZSTDMT_JOBSIZE_MIN (1 MB) +#endif +#define ZSTDMT_JOBLOG_MAX (MEM_32bits() ? 29 : 30) +#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (1024 MB)) + + +/* === Memory management === */ +typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx; +/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ +ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers); +/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */ +ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, + ZSTD_customMem cMem); +ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx); + +ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx); + + +/* === Simple one-pass compression function === */ + +ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + int compressionLevel); + + + +/* === Streaming functions === */ + +ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel); +ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */ + +ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); +ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input); + +ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ +ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */ + + +/* === Advanced functions and parameters === */ + +ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict, + ZSTD_parameters params, + int overlapLog); + +ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx, + const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */ + ZSTD_parameters params, + unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */ + +ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx, + const ZSTD_CDict* cdict, + ZSTD_frameParameters fparams, + unsigned long long pledgedSrcSize); /* note : zero means empty */ + +/* ZSTDMT_parameter : + * List of parameters that can be set using ZSTDMT_setMTCtxParameter() */ +typedef enum { + ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */ + ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */ + ZSTDMT_p_rsyncable /* Enables rsyncable mode. */ +} ZSTDMT_parameter; + +/* ZSTDMT_setMTCtxParameter() : + * allow setting individual parameters, one at a time, among a list of enums defined in ZSTDMT_parameter. + * The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__ + * Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions. + * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ +ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value); + +/* ZSTDMT_getMTCtxParameter() : + * Query the ZSTDMT_CCtx for a parameter value. + * @return : 0, or an error code (which can be tested using ZSTD_isError()) */ +ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value); + + +/*! ZSTDMT_compressStream_generic() : + * Combines ZSTDMT_compressStream() with optional ZSTDMT_flushStream() or ZSTDMT_endStream() + * depending on flush directive. + * @return : minimum amount of data still to be flushed + * 0 if fully flushed + * or an error code + * note : needs to be init using any ZSTD_initCStream*() variant */ +ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx, + ZSTD_outBuffer* output, + ZSTD_inBuffer* input, + ZSTD_EndDirective endOp); + + +/* ======================================================== + * === Private interface, for use by ZSTD_compress.c === + * === Not exposed in libzstd. Never invoke directly === + * ======================================================== */ + + /*! ZSTDMT_toFlushNow() + * Tell how many bytes are ready to be flushed immediately. + * Probe the oldest active job (not yet entirely flushed) and check its output buffer. + * If return 0, it means there is no active job, + * or, it means oldest job is still active, but everything produced has been flushed so far, + * therefore flushing is limited by speed of oldest job. */ +size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx); + +/*! ZSTDMT_CCtxParam_setMTCtxParameter() + * like ZSTDMT_setMTCtxParameter(), but into a ZSTD_CCtx_Params */ +size_t ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params, ZSTDMT_parameter parameter, int value); + +/*! ZSTDMT_CCtxParam_setNbWorkers() + * Set nbWorkers, and clamp it. + * Also reset jobSize and overlapLog */ +size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers); + +/*! ZSTDMT_updateCParams_whileCompressing() : + * Updates only a selected set of compression parameters, to remain compatible with current frame. + * New parameters will be applied to next compression job. */ +void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams); + +/*! ZSTDMT_getFrameProgression(): + * tells how much data has been consumed (input) and produced (output) for current frame. + * able to count progression inside worker threads. + */ +ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx); + + +/*! ZSTDMT_initCStream_internal() : + * Private use only. Init streaming operation. + * expects params to be valid. + * must receive dict, or cdict, or none, but not both. + * @return : 0, or an error code */ +size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, + const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, + const ZSTD_CDict* cdict, + ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTDMT_COMPRESS_H */ diff --git a/src/borg/algorithms/zstd/lib/decompress/huf_decompress.c b/src/borg/algorithms/zstd/lib/decompress/huf_decompress.c new file mode 100644 index 0000000000..68293a1309 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/huf_decompress.c @@ -0,0 +1,1248 @@ +/* ****************************************************************** + * huff0 huffman decoder, + * part of Finite State Entropy library + * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. +****************************************************************** */ + +/* ************************************************************** +* Dependencies +****************************************************************/ +#include /* memcpy, memset */ +#include "../common/compiler.h" +#include "../common/bitstream.h" /* BIT_* */ +#include "../common/fse.h" /* to compress headers */ +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" +#include "../common/error_private.h" + +/* ************************************************************** +* Macros +****************************************************************/ + +/* These two optional macros force the use one way or another of the two + * Huffman decompression implementations. You can't force in both directions + * at the same time. + */ +#if defined(HUF_FORCE_DECOMPRESS_X1) && \ + defined(HUF_FORCE_DECOMPRESS_X2) +#error "Cannot force the use of the X1 and X2 decoders at the same time!" +#endif + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_isError ERR_isError + + +/* ************************************************************** +* Byte alignment for workSpace management +****************************************************************/ +#define HUF_ALIGN(x, a) HUF_ALIGN_MASK((x), (a) - 1) +#define HUF_ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) + + +/* ************************************************************** +* BMI2 Variant Wrappers +****************************************************************/ +#if DYNAMIC_BMI2 + +#define HUF_DGEN(fn) \ + \ + static size_t fn##_default( \ + void* dst, size_t dstSize, \ + const void* cSrc, size_t cSrcSize, \ + const HUF_DTable* DTable) \ + { \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + \ + static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ + void* dst, size_t dstSize, \ + const void* cSrc, size_t cSrcSize, \ + const HUF_DTable* DTable) \ + { \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + \ + static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + { \ + if (bmi2) { \ + return fn##_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); \ + } \ + return fn##_default(dst, dstSize, cSrc, cSrcSize, DTable); \ + } + +#else + +#define HUF_DGEN(fn) \ + static size_t fn(void* dst, size_t dstSize, void const* cSrc, \ + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) \ + { \ + (void)bmi2; \ + return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ + } + +#endif + + +/*-***************************/ +/* generic DTableDesc */ +/*-***************************/ +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; + +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) +{ + DTableDesc dtd; + memcpy(&dtd, table, sizeof(dtd)); + return dtd; +} + + +#ifndef HUF_FORCE_DECOMPRESS_X2 + +/*-***************************/ +/* single-symbol decoding */ +/*-***************************/ +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ + +size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) +{ + U32 tableLog = 0; + U32 nbSymbols = 0; + size_t iSize; + void* const dtPtr = DTable + 1; + HUF_DEltX1* const dt = (HUF_DEltX1*)dtPtr; + + U32* rankVal; + BYTE* huffWeight; + size_t spaceUsed32 = 0; + + rankVal = (U32 *)workSpace + spaceUsed32; + spaceUsed32 += HUF_TABLELOG_ABSOLUTEMAX + 1; + huffWeight = (BYTE *)((U32 *)workSpace + spaceUsed32); + spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; + + if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); + + DEBUG_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* Table header */ + { DTableDesc dtd = HUF_getDTableDesc(DTable); + if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ + dtd.tableType = 0; + dtd.tableLog = (BYTE)tableLog; + memcpy(DTable, &dtd, sizeof(dtd)); + } + + /* Calculate starting value for each rank */ + { U32 n, nextRankStart = 0; + for (n=1; n> 1; + size_t const uStart = rankVal[w]; + size_t const uEnd = uStart + length; + size_t u; + HUF_DEltX1 D; + D.byte = (BYTE)n; + D.nbBits = (BYTE)(tableLog + 1 - w); + rankVal[w] = (U32)uEnd; + if (length < 4) { + /* Use length in the loop bound so the compiler knows it is short. */ + for (u = 0; u < length; ++u) + dt[uStart + u] = D; + } else { + /* Unroll the loop 4 times, we know it is a power of 2. */ + for (u = uStart; u < uEnd; u += 4) { + dt[u + 0] = D; + dt[u + 1] = D; + dt[u + 2] = D; + dt[u + 3] = D; + } } } } + return iSize; +} + +size_t HUF_readDTableX1(HUF_DTable* DTable, const void* src, size_t srcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_readDTableX1_wksp(DTable, src, srcSize, + workSpace, sizeof(workSpace)); +} + +FORCE_INLINE_TEMPLATE BYTE +HUF_decodeSymbolX1(BIT_DStream_t* Dstream, const HUF_DEltX1* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + BYTE const c = dt[val].byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; +} + +#define HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX1(DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX1_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) + +#define HUF_DECODE_SYMBOLX1_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX1_0(ptr, DStreamPtr) + +HINT_INLINE size_t +HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX1* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 4 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_1(p, bitDPtr); + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + } + + /* [0-3] symbols remaining */ + if (MEM_32bits()) + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd)) + HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + + /* no more data to retrieve from bitstream, no need to reload */ + while (p < pEnd) + HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + + return pEnd-pStart; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress1X1_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + dstSize; + const void* dtPtr = DTable + 1; + const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; + BIT_DStream_t bitD; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); + + HUF_decodeStreamX1(op, &bitD, oend, dt, dtLog); + + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + return dstSize; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress4X1_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + /* Check */ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + BYTE* const olimit = oend - 3; + const void* const dtPtr = DTable + 1; + const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + U32 endSignal = 1; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); + CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); + CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); + CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); + + /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ + for ( ; (endSignal) & (op4 < olimit) ; ) { + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_1(op1, &bitD1); + HUF_DECODE_SYMBOLX1_1(op2, &bitD2); + HUF_DECODE_SYMBOLX1_1(op3, &bitD3); + HUF_DECODE_SYMBOLX1_1(op4, &bitD4); + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_0(op1, &bitD1); + HUF_DECODE_SYMBOLX1_0(op2, &bitD2); + HUF_DECODE_SYMBOLX1_0(op3, &bitD3); + HUF_DECODE_SYMBOLX1_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + } + + /* check corruption */ + /* note : should not be necessary : op# advance in lock step, and we control op4. + * but curiously, binary generated by gcc 7.2 & 7.3 with -mbmi2 runs faster when >=1 test is present */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX1(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX1(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX1(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX1(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + + +typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, + const void *cSrc, + size_t cSrcSize, + const HUF_DTable *DTable); + +HUF_DGEN(HUF_decompress1X1_usingDTable_internal) +HUF_DGEN(HUF_decompress4X1_usingDTable_internal) + + + +size_t HUF_decompress1X1_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX1_wksp(DCtx, cSrc, cSrcSize, workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); +} + + +size_t HUF_decompress1X1_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress1X1_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X1_DCtx (DTable, dst, dstSize, cSrc, cSrcSize); +} + +size_t HUF_decompress4X1_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +static size_t HUF_decompress4X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize, int bmi2) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX1_wksp (dctx, cSrc, cSrcSize, + workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); +} + +size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, 0); +} + + +size_t HUF_decompress4X1_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} +size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX1(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X1_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + +#endif /* HUF_FORCE_DECOMPRESS_X2 */ + + +#ifndef HUF_FORCE_DECOMPRESS_X1 + +/* *************************/ +/* double-symbols decoding */ +/* *************************/ + +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; +typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; +typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; + + +/* HUF_fillDTableX2Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, + const U32* rankValOrigin, const int minWeight, + const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, + U32 nbBitsBaseline, U16 baseSeq) +{ + HUF_DEltX2 DElt; + U32 rankVal[HUF_TABLELOG_MAX + 1]; + + /* get pre-calculated rankVal */ + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill skipped values */ + if (minWeight>1) { + U32 i, skipSize = rankVal[minWeight]; + MEM_writeLE16(&(DElt.sequence), baseSeq); + DElt.nbBits = (BYTE)(consumed); + DElt.length = 1; + for (i = 0; i < skipSize; i++) + DTable[i] = DElt; + } + + /* fill DTable */ + { U32 s; for (s=0; s= 1 */ + + rankVal[weight] += length; + } } +} + + +static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, + const sortedSymbol_t* sortedList, const U32 sortedListSize, + const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32 nbBitsBaseline) +{ + U32 rankVal[HUF_TABLELOG_MAX + 1]; + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + const U32 minBits = nbBitsBaseline - maxWeight; + U32 s; + + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill DTable */ + for (s=0; s= minBits) { /* enough room for a second symbol */ + U32 sortedRank; + int minWeight = nbBits + scaleLog; + if (minWeight < 1) minWeight = 1; + sortedRank = rankStart[minWeight]; + HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, + rankValOrigin[nbBits], minWeight, + sortedList+sortedRank, sortedListSize-sortedRank, + nbBitsBaseline, symbol); + } else { + HUF_DEltX2 DElt; + MEM_writeLE16(&(DElt.sequence), symbol); + DElt.nbBits = (BYTE)(nbBits); + DElt.length = 1; + { U32 const end = start + length; + U32 u; + for (u = start; u < end; u++) DTable[u] = DElt; + } } + rankVal[weight] += length; + } +} + +size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize) +{ + U32 tableLog, maxW, sizeOfSort, nbSymbols; + DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog; + size_t iSize; + void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ + HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; + U32 *rankStart; + + rankValCol_t* rankVal; + U32* rankStats; + U32* rankStart0; + sortedSymbol_t* sortedSymbol; + BYTE* weightList; + size_t spaceUsed32 = 0; + + rankVal = (rankValCol_t *)((U32 *)workSpace + spaceUsed32); + spaceUsed32 += (sizeof(rankValCol_t) * HUF_TABLELOG_MAX) >> 2; + rankStats = (U32 *)workSpace + spaceUsed32; + spaceUsed32 += HUF_TABLELOG_MAX + 1; + rankStart0 = (U32 *)workSpace + spaceUsed32; + spaceUsed32 += HUF_TABLELOG_MAX + 2; + sortedSymbol = (sortedSymbol_t *)workSpace + (spaceUsed32 * sizeof(U32)) / sizeof(sortedSymbol_t); + spaceUsed32 += HUF_ALIGN(sizeof(sortedSymbol_t) * (HUF_SYMBOLVALUE_MAX + 1), sizeof(U32)) >> 2; + weightList = (BYTE *)((U32 *)workSpace + spaceUsed32); + spaceUsed32 += HUF_ALIGN(HUF_SYMBOLVALUE_MAX + 1, sizeof(U32)) >> 2; + + if ((spaceUsed32 << 2) > wkspSize) return ERROR(tableLog_tooLarge); + + rankStart = rankStart0 + 1; + memset(rankStats, 0, sizeof(U32) * (2 * HUF_TABLELOG_MAX + 2 + 1)); + + DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ + if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + + /* find maxWeight */ + for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ + + /* Get start index of each weight */ + { U32 w, nextRankStart = 0; + for (w=1; w> consumed; + } } } } + + HUF_fillDTableX2(dt, maxTableLog, + sortedSymbol, sizeOfSort, + rankStart0, rankVal, maxW, + tableLog+1); + + dtd.tableLog = (BYTE)maxTableLog; + dtd.tableType = 1; + memcpy(DTable, &dtd, sizeof(dtd)); + return iSize; +} + +size_t HUF_readDTableX2(HUF_DTable* DTable, const void* src, size_t srcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_readDTableX2_wksp(DTable, src, srcSize, + workSpace, sizeof(workSpace)); +} + + +FORCE_INLINE_TEMPLATE U32 +HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +FORCE_INLINE_TEMPLATE U32 +HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); + } } + return 1; +} + +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX2(ptr, DStreamPtr, dt, dtLog) + +HINT_INLINE size_t +HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, + const HUF_DEltX2* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + + /* closer to end : up to 2 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress1X2_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BIT_DStream_t bitD; + + /* Init */ + CHECK_F( BIT_initDStream(&bitD, cSrc, cSrcSize) ); + + /* decode */ + { BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX2(ostart, &bitD, oend, dt, dtd.tableLog); + } + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; +} + +FORCE_INLINE_TEMPLATE size_t +HUF_decompress4X2_usingDTable_internal_body( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + BYTE* const olimit = oend - (sizeof(size_t)-1); + const void* const dtPtr = DTable+1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + size_t const segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal = 1; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); + CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); + CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); + CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + for ( ; (endSignal) & (op4 < olimit); ) { +#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; +#else + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal = (U32)LIKELY( + (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); +#endif + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + +HUF_DGEN(HUF_decompress1X2_usingDTable_internal) +HUF_DGEN(HUF_decompress4X2_usingDTable_internal) + +size_t HUF_decompress1X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2_wksp(DCtx, cSrc, cSrcSize, + workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, DCtx, /* bmi2 */ 0); +} + + +size_t HUF_decompress1X2_DCtx(HUF_DTable* DCtx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress1X2_DCtx_wksp(DCtx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress1X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + +size_t HUF_decompress4X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +} + +static size_t HUF_decompress4X2_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize, int bmi2) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t hSize = HUF_readDTableX2_wksp(dctx, cSrc, cSrcSize, + workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); +} + +size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, /* bmi2 */ 0); +} + + +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + +size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_TABLELOG_MAX); + return HUF_decompress4X2_DCtx(DTable, dst, dstSize, cSrc, cSrcSize); +} + +#endif /* HUF_FORCE_DECOMPRESS_X1 */ + + +/* ***********************************/ +/* Universal decompression selectors */ +/* ***********************************/ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)dtd; + assert(dtd.tableType == 0); + return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)dtd; + assert(dtd.tableType == 1); + return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#else + return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : + HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#endif +} + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)dtd; + assert(dtd.tableType == 0); + return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)dtd; + assert(dtd.tableType == 1); + return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#else + return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0) : + HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, /* bmi2 */ 0); +#endif +} + + +#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +{ + /* single, double, quad */ + {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ + {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ + {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ + {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ + {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ + {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ + {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ + {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ + {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ + {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ + {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ + {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ + {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ + {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ + {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ + {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ +}; +#endif + +/** HUF_selectDecoder() : + * Tells which decoder is likely to decode faster, + * based on a set of pre-computed metrics. + * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 . + * Assumption : 0 < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) +{ + assert(dstSize > 0); + assert(dstSize <= 128*1024); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)dstSize; + (void)cSrcSize; + return 0; +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)dstSize; + (void)cSrcSize; + return 1; +#else + /* decoder timing evaluation */ + { U32 const Q = (cSrcSize >= dstSize) ? 15 : (U32)(cSrcSize * 16 / dstSize); /* Q < 16 */ + U32 const D256 = (U32)(dstSize >> 8); + U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); + U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); + DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ + return DTime1 < DTime0; + } +#endif +} + + +typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); + +size_t HUF_decompress (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ +#if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) + static const decompressionAlgo decompress[2] = { HUF_decompress4X1, HUF_decompress4X2 }; +#endif + + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)algoNb; + assert(algoNb == 0); + return HUF_decompress4X1(dst, dstSize, cSrc, cSrcSize); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)algoNb; + assert(algoNb == 1); + return HUF_decompress4X2(dst, dstSize, cSrc, cSrcSize); +#else + return decompress[algoNb](dst, dstSize, cSrc, cSrcSize); +#endif + } +} + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)algoNb; + assert(algoNb == 0); + return HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)algoNb; + assert(algoNb == 1); + return HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize); +#else + return algoNb ? HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X1_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; +#endif + } +} + +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress4X_hufOnly_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + + +size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, + size_t dstSize, const void* cSrc, + size_t cSrcSize, void* workSpace, + size_t wkspSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize == 0) return ERROR(corruption_detected); + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)algoNb; + assert(algoNb == 0); + return HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)algoNb; + assert(algoNb == 1); + return HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); +#else + return algoNb ? HUF_decompress4X2_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize): + HUF_decompress4X1_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize); +#endif + } +} + +size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + void* workSpace, size_t wkspSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)algoNb; + assert(algoNb == 0); + return HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)algoNb; + assert(algoNb == 1); + return HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize); +#else + return algoNb ? HUF_decompress1X2_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize): + HUF_decompress1X1_DCtx_wksp(dctx, dst, dstSize, cSrc, + cSrcSize, workSpace, wkspSize); +#endif + } +} + +size_t HUF_decompress1X_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize) +{ + U32 workSpace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; + return HUF_decompress1X_DCtx_wksp(dctx, dst, dstSize, cSrc, cSrcSize, + workSpace, sizeof(workSpace)); +} + + +size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)dtd; + assert(dtd.tableType == 0); + return HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)dtd; + assert(dtd.tableType == 1); + return HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +#else + return dtd.tableType ? HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : + HUF_decompress1X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +#endif +} + +#ifndef HUF_FORCE_DECOMPRESS_X2 +size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX1_wksp(dctx, cSrc, cSrcSize, workSpace, wkspSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X1_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx, bmi2); +} +#endif + +size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)dtd; + assert(dtd.tableType == 0); + return HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)dtd; + assert(dtd.tableType == 1); + return HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +#else + return dtd.tableType ? HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2) : + HUF_decompress4X1_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable, bmi2); +#endif +} + +size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize == 0) return ERROR(corruption_detected); + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); +#if defined(HUF_FORCE_DECOMPRESS_X1) + (void)algoNb; + assert(algoNb == 0); + return HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); +#elif defined(HUF_FORCE_DECOMPRESS_X2) + (void)algoNb; + assert(algoNb == 1); + return HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); +#else + return algoNb ? HUF_decompress4X2_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2) : + HUF_decompress4X1_DCtx_wksp_bmi2(dctx, dst, dstSize, cSrc, cSrcSize, workSpace, wkspSize, bmi2); +#endif + } +} diff --git a/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.c b/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.c new file mode 100644 index 0000000000..c8cb8ecc95 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.c @@ -0,0 +1,244 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* zstd_ddict.c : + * concentrates all logic that needs to know the internals of ZSTD_DDict object */ + +/*-******************************************************* +* Dependencies +*********************************************************/ +#include /* memcpy, memmove, memset */ +#include "../common/cpu.h" /* bmi2 */ +#include "../common/mem.h" /* low level memory routines */ +#define FSE_STATIC_LINKING_ONLY +#include "../common/fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" +#include "zstd_decompress_internal.h" +#include "zstd_ddict.h" + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) +# include "../legacy/zstd_legacy.h" +#endif + + + +/*-******************************************************* +* Types +*********************************************************/ +struct ZSTD_DDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictSize; + ZSTD_entropyDTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; /* typedef'd to ZSTD_DDict within "zstd.h" */ + +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict) +{ + assert(ddict != NULL); + return ddict->dictContent; +} + +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict) +{ + assert(ddict != NULL); + return ddict->dictSize; +} + +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) +{ + DEBUGLOG(4, "ZSTD_copyDDictParameters"); + assert(dctx != NULL); + assert(ddict != NULL); + dctx->dictID = ddict->dictID; + dctx->prefixStart = ddict->dictContent; + dctx->virtualStart = ddict->dictContent; + dctx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; + dctx->previousDstEnd = dctx->dictEnd; +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + dctx->dictContentBeginForFuzzing = dctx->prefixStart; + dctx->dictContentEndForFuzzing = dctx->previousDstEnd; +#endif + if (ddict->entropyPresent) { + dctx->litEntropy = 1; + dctx->fseEntropy = 1; + dctx->LLTptr = ddict->entropy.LLTable; + dctx->MLTptr = ddict->entropy.MLTable; + dctx->OFTptr = ddict->entropy.OFTable; + dctx->HUFptr = ddict->entropy.hufTable; + dctx->entropy.rep[0] = ddict->entropy.rep[0]; + dctx->entropy.rep[1] = ddict->entropy.rep[1]; + dctx->entropy.rep[2] = ddict->entropy.rep[2]; + } else { + dctx->litEntropy = 0; + dctx->fseEntropy = 0; + } +} + + +static size_t +ZSTD_loadEntropy_intoDDict(ZSTD_DDict* ddict, + ZSTD_dictContentType_e dictContentType) +{ + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (dictContentType == ZSTD_dct_rawContent) return 0; + + if (ddict->dictSize < 8) { + if (dictContentType == ZSTD_dct_fullDict) + return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ + return 0; /* pure content mode */ + } + { U32 const magic = MEM_readLE32(ddict->dictContent); + if (magic != ZSTD_MAGIC_DICTIONARY) { + if (dictContentType == ZSTD_dct_fullDict) + return ERROR(dictionary_corrupted); /* only accept specified dictionaries */ + return 0; /* pure content mode */ + } + } + ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_FRAMEIDSIZE); + + /* load entropy tables */ + RETURN_ERROR_IF(ZSTD_isError(ZSTD_loadDEntropy( + &ddict->entropy, ddict->dictContent, ddict->dictSize)), + dictionary_corrupted, ""); + ddict->entropyPresent = 1; + return 0; +} + + +static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) +{ + if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) { + ddict->dictBuffer = NULL; + ddict->dictContent = dict; + if (!dict) dictSize = 0; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; + if (!internalBuffer) return ERROR(memory_allocation); + memcpy(internalBuffer, dict, dictSize); + } + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + + /* parse dictionary content */ + FORWARD_IF_ERROR( ZSTD_loadEntropy_intoDDict(ddict, dictContentType) , ""); + + return 0; +} + +ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType, + ZSTD_customMem customMem) +{ + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + if (ddict == NULL) return NULL; + ddict->cMem = customMem; + { size_t const initResult = ZSTD_initDDict_internal(ddict, + dict, dictSize, + dictLoadMethod, dictContentType); + if (ZSTD_isError(initResult)) { + ZSTD_freeDDict(ddict); + return NULL; + } } + return ddict; + } +} + +/*! ZSTD_createDDict() : +* Create a digested dictionary, to start decompression without startup delay. +* `dict` content is copied inside DDict. +* Consequently, `dict` can be released after `ZSTD_DDict` creation */ +ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator); +} + +/*! ZSTD_createDDict_byReference() : + * Create a digested dictionary, to start decompression without startup delay. + * Dictionary content is simply referenced, it will be accessed during decompression. + * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */ +ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize) +{ + ZSTD_customMem const allocator = { NULL, NULL, NULL }; + return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator); +} + + +const ZSTD_DDict* ZSTD_initStaticDDict( + void* sBuffer, size_t sBufferSize, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) +{ + size_t const neededSpace = sizeof(ZSTD_DDict) + + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); + ZSTD_DDict* const ddict = (ZSTD_DDict*)sBuffer; + assert(sBuffer != NULL); + assert(dict != NULL); + if ((size_t)sBuffer & 7) return NULL; /* 8-aligned */ + if (sBufferSize < neededSpace) return NULL; + if (dictLoadMethod == ZSTD_dlm_byCopy) { + memcpy(ddict+1, dict, dictSize); /* local copy */ + dict = ddict+1; + } + if (ZSTD_isError( ZSTD_initDDict_internal(ddict, + dict, dictSize, + ZSTD_dlm_byRef, dictContentType) )) + return NULL; + return ddict; +} + + +size_t ZSTD_freeDDict(ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = ddict->cMem; + ZSTD_free(ddict->dictBuffer, cMem); + ZSTD_free(ddict, cMem); + return 0; + } +} + +/*! ZSTD_estimateDDictSize() : + * Estimate amount of memory that will be needed to create a dictionary for decompression. + * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */ +size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod) +{ + return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize); +} + +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support sizeof on NULL */ + return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ; +} + +/*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; + return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); +} diff --git a/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.h b/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.h new file mode 100644 index 0000000000..af307efd3d --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/zstd_ddict.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +#ifndef ZSTD_DDICT_H +#define ZSTD_DDICT_H + +/*-******************************************************* + * Dependencies + *********************************************************/ +#include /* size_t */ +#include "../zstd.h" /* ZSTD_DDict, and several public functions */ + + +/*-******************************************************* + * Interface + *********************************************************/ + +/* note: several prototypes are already published in `zstd.h` : + * ZSTD_createDDict() + * ZSTD_createDDict_byReference() + * ZSTD_createDDict_advanced() + * ZSTD_freeDDict() + * ZSTD_initStaticDDict() + * ZSTD_sizeof_DDict() + * ZSTD_estimateDDictSize() + * ZSTD_getDictID_fromDict() + */ + +const void* ZSTD_DDict_dictContent(const ZSTD_DDict* ddict); +size_t ZSTD_DDict_dictSize(const ZSTD_DDict* ddict); + +void ZSTD_copyDDictParameters(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); + + + +#endif /* ZSTD_DDICT_H */ diff --git a/src/borg/algorithms/zstd/lib/decompress/zstd_decompress.c b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress.c new file mode 100644 index 0000000000..be5c7cfc33 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress.c @@ -0,0 +1,1885 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/* *************************************************************** +* Tuning parameters +*****************************************************************/ +/*! + * HEAPMODE : + * Select how default decompression function ZSTD_decompress() allocates its context, + * on stack (0), or into heap (1, default; requires malloc()). + * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected. + */ +#ifndef ZSTD_HEAPMODE +# define ZSTD_HEAPMODE 1 +#endif + +/*! +* LEGACY_SUPPORT : +* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+) +*/ +#ifndef ZSTD_LEGACY_SUPPORT +# define ZSTD_LEGACY_SUPPORT 0 +#endif + +/*! + * MAXWINDOWSIZE_DEFAULT : + * maximum window size accepted by DStream __by default__. + * Frames requiring more memory will be rejected. + * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize(). + */ +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT +# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) + 1) +#endif + +/*! + * NO_FORWARD_PROGRESS_MAX : + * maximum allowed nb of calls to ZSTD_decompressStream() + * without any forward progress + * (defined as: no byte read from input, and no byte flushed to output) + * before triggering an error. + */ +#ifndef ZSTD_NO_FORWARD_PROGRESS_MAX +# define ZSTD_NO_FORWARD_PROGRESS_MAX 16 +#endif + + +/*-******************************************************* +* Dependencies +*********************************************************/ +#include /* memcpy, memmove, memset */ +#include "../common/cpu.h" /* bmi2 */ +#include "../common/mem.h" /* low level memory routines */ +#define FSE_STATIC_LINKING_ONLY +#include "../common/fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" +#include "../common/zstd_internal.h" /* blockProperties_t */ +#include "zstd_decompress_internal.h" /* ZSTD_DCtx */ +#include "zstd_ddict.h" /* ZSTD_DDictDictContent */ +#include "zstd_decompress_block.h" /* ZSTD_decompressBlock_internal */ + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) +# include "../legacy/zstd_legacy.h" +#endif + + +/*-************************************************************* +* Context management +***************************************************************/ +size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx) +{ + if (dctx==NULL) return 0; /* support sizeof NULL */ + return sizeof(*dctx) + + ZSTD_sizeof_DDict(dctx->ddictLocal) + + dctx->inBuffSize + dctx->outBuffSize; +} + +size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); } + + +static size_t ZSTD_startingInputLength(ZSTD_format_e format) +{ + size_t const startingInputLength = ZSTD_FRAMEHEADERSIZE_PREFIX(format); + /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */ + assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) ); + return startingInputLength; +} + +static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) +{ + dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */ + dctx->staticSize = 0; + dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + dctx->ddict = NULL; + dctx->ddictLocal = NULL; + dctx->dictEnd = NULL; + dctx->ddictIsCold = 0; + dctx->dictUses = ZSTD_dont_use; + dctx->inBuff = NULL; + dctx->inBuffSize = 0; + dctx->outBuffSize = 0; + dctx->streamStage = zdss_init; + dctx->legacyContext = NULL; + dctx->previousLegacyVersion = 0; + dctx->noForwardProgress = 0; + dctx->oversizedDuration = 0; + dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + dctx->outBufferMode = ZSTD_obm_buffered; +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + dctx->dictContentEndForFuzzing = NULL; +#endif +} + +ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) +{ + ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace; + + if ((size_t)workspace & 7) return NULL; /* 8-aligned */ + if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */ + + ZSTD_initDCtx_internal(dctx); + dctx->staticSize = workspaceSize; + dctx->inBuff = (char*)(dctx+1); + return dctx; +} + +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + if (!customMem.customAlloc ^ !customMem.customFree) return NULL; + + { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem); + if (!dctx) return NULL; + dctx->customMem = customMem; + ZSTD_initDCtx_internal(dctx); + return dctx; + } +} + +ZSTD_DCtx* ZSTD_createDCtx(void) +{ + DEBUGLOG(3, "ZSTD_createDCtx"); + return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); +} + +static void ZSTD_clearDict(ZSTD_DCtx* dctx) +{ + ZSTD_freeDDict(dctx->ddictLocal); + dctx->ddictLocal = NULL; + dctx->ddict = NULL; + dctx->dictUses = ZSTD_dont_use; +} + +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) +{ + if (dctx==NULL) return 0; /* support free on NULL */ + RETURN_ERROR_IF(dctx->staticSize, memory_allocation, "not compatible with static DCtx"); + { ZSTD_customMem const cMem = dctx->customMem; + ZSTD_clearDict(dctx); + ZSTD_free(dctx->inBuff, cMem); + dctx->inBuff = NULL; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (dctx->legacyContext) + ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion); +#endif + ZSTD_free(dctx, cMem); + return 0; + } +} + +/* no longer useful */ +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) +{ + size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx); + memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */ +} + + +/*-************************************************************* + * Frame header decoding + ***************************************************************/ + +/*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ +unsigned ZSTD_isFrame(const void* buffer, size_t size) +{ + if (size < ZSTD_FRAMEIDSIZE) return 0; + { U32 const magic = MEM_readLE32(buffer); + if (magic == ZSTD_MAGICNUMBER) return 1; + if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(buffer, size)) return 1; +#endif + return 0; +} + +/** ZSTD_frameHeaderSize_internal() : + * srcSize must be large enough to reach header size fields. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. + * @return : size of the Frame Header + * or an error code, which can be tested with ZSTD_isError() */ +static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format) +{ + size_t const minInputSize = ZSTD_startingInputLength(format); + RETURN_ERROR_IF(srcSize < minInputSize, srcSize_wrong, ""); + + { BYTE const fhd = ((const BYTE*)src)[minInputSize-1]; + U32 const dictID= fhd & 3; + U32 const singleSegment = (fhd >> 5) & 1; + U32 const fcsId = fhd >> 6; + return minInputSize + !singleSegment + + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + + (singleSegment && !fcsId); + } +} + +/** ZSTD_frameHeaderSize() : + * srcSize must be >= ZSTD_frameHeaderSize_prefix. + * @return : size of the Frame Header, + * or an error code (if srcSize is too small) */ +size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +{ + return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1); +} + + +/** ZSTD_getFrameHeader_advanced() : + * decode Frame Header, or require larger `srcSize`. + * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format) +{ + const BYTE* ip = (const BYTE*)src; + size_t const minInputSize = ZSTD_startingInputLength(format); + + memset(zfhPtr, 0, sizeof(*zfhPtr)); /* not strictly necessary, but static analyzer do not understand that zfhPtr is only going to be read only if return value is zero, since they are 2 different signals */ + if (srcSize < minInputSize) return minInputSize; + RETURN_ERROR_IF(src==NULL, GENERIC, "invalid parameter"); + + if ( (format != ZSTD_f_zstd1_magicless) + && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) { + if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + /* skippable frame */ + if (srcSize < ZSTD_SKIPPABLEHEADERSIZE) + return ZSTD_SKIPPABLEHEADERSIZE; /* magic number + frame length */ + memset(zfhPtr, 0, sizeof(*zfhPtr)); + zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_FRAMEIDSIZE); + zfhPtr->frameType = ZSTD_skippableFrame; + return 0; + } + RETURN_ERROR(prefix_unknown, ""); + } + + /* ensure there is enough `srcSize` to fully read/decode frame header */ + { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format); + if (srcSize < fhsize) return fhsize; + zfhPtr->headerSize = (U32)fhsize; + } + + { BYTE const fhdByte = ip[minInputSize-1]; + size_t pos = minInputSize; + U32 const dictIDSizeCode = fhdByte&3; + U32 const checksumFlag = (fhdByte>>2)&1; + U32 const singleSegment = (fhdByte>>5)&1; + U32 const fcsID = fhdByte>>6; + U64 windowSize = 0; + U32 dictID = 0; + U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN; + RETURN_ERROR_IF((fhdByte & 0x08) != 0, frameParameter_unsupported, + "reserved bits, must be zero"); + + if (!singleSegment) { + BYTE const wlByte = ip[pos++]; + U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; + RETURN_ERROR_IF(windowLog > ZSTD_WINDOWLOG_MAX, frameParameter_windowTooLarge, ""); + windowSize = (1ULL << windowLog); + windowSize += (windowSize >> 3) * (wlByte&7); + } + switch(dictIDSizeCode) + { + default: assert(0); /* impossible */ + case 0 : break; + case 1 : dictID = ip[pos]; pos++; break; + case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; + case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; + } + switch(fcsID) + { + default: assert(0); /* impossible */ + case 0 : if (singleSegment) frameContentSize = ip[pos]; break; + case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; + case 2 : frameContentSize = MEM_readLE32(ip+pos); break; + case 3 : frameContentSize = MEM_readLE64(ip+pos); break; + } + if (singleSegment) windowSize = frameContentSize; + + zfhPtr->frameType = ZSTD_frame; + zfhPtr->frameContentSize = frameContentSize; + zfhPtr->windowSize = windowSize; + zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + zfhPtr->dictID = dictID; + zfhPtr->checksumFlag = checksumFlag; + } + return 0; +} + +/** ZSTD_getFrameHeader() : + * decode Frame Header, or require larger `srcSize`. + * note : this function does not consume input, it only reads it. + * @return : 0, `zfhPtr` is correctly filled, + * >0, `srcSize` is too small, value is wanted `srcSize` amount, + * or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize) +{ + return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); +} + + +/** ZSTD_getFrameContentSize() : + * compatible with legacy mode + * @return : decompressed size of the single frame pointed to be `src` if known, otherwise + * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) +{ +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) { + unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize); + return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret; + } +#endif + { ZSTD_frameHeader zfh; + if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0) + return ZSTD_CONTENTSIZE_ERROR; + if (zfh.frameType == ZSTD_skippableFrame) { + return 0; + } else { + return zfh.frameContentSize; + } } +} + +static size_t readSkippableFrameSize(void const* src, size_t srcSize) +{ + size_t const skippableHeaderSize = ZSTD_SKIPPABLEHEADERSIZE; + U32 sizeU32; + + RETURN_ERROR_IF(srcSize < ZSTD_SKIPPABLEHEADERSIZE, srcSize_wrong, ""); + + sizeU32 = MEM_readLE32((BYTE const*)src + ZSTD_FRAMEIDSIZE); + RETURN_ERROR_IF((U32)(sizeU32 + ZSTD_SKIPPABLEHEADERSIZE) < sizeU32, + frameParameter_unsupported, ""); + { + size_t const skippableSize = skippableHeaderSize + sizeU32; + RETURN_ERROR_IF(skippableSize > srcSize, srcSize_wrong, ""); + return skippableSize; + } +} + +/** ZSTD_findDecompressedSize() : + * compatible with legacy mode + * `srcSize` must be the exact length of some number of ZSTD compressed and/or + * skippable frames + * @return : decompressed size of the frames contained */ +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) +{ + unsigned long long totalDstSize = 0; + + while (srcSize >= ZSTD_startingInputLength(ZSTD_f_zstd1)) { + U32 const magicNumber = MEM_readLE32(src); + + if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t const skippableSize = readSkippableFrameSize(src, srcSize); + if (ZSTD_isError(skippableSize)) { + return ZSTD_CONTENTSIZE_ERROR; + } + assert(skippableSize <= srcSize); + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } + + { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + + /* check for overflow */ + if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; + totalDstSize += ret; + } + { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); + if (ZSTD_isError(frameSrcSize)) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + frameSrcSize; + srcSize -= frameSrcSize; + } + } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ + + if (srcSize) return ZSTD_CONTENTSIZE_ERROR; + + return totalDstSize; +} + +/** ZSTD_getDecompressedSize() : + * compatible with legacy mode + * @return : decompressed size if known, 0 otherwise + note : 0 can mean any of the following : + - frame content is empty + - decompressed size field is not present in frame header + - frame header unknown / not supported + - frame header not complete (`srcSize` too small) */ +unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize) +{ + unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN); + return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret; +} + + +/** ZSTD_decodeFrameHeader() : + * `headerSize` must be the size provided by ZSTD_frameHeaderSize(). + * @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) +{ + size_t const result = ZSTD_getFrameHeader_advanced(&(dctx->fParams), src, headerSize, dctx->format); + if (ZSTD_isError(result)) return result; /* invalid header */ + RETURN_ERROR_IF(result>0, srcSize_wrong, "headerSize too small"); +#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + /* Skip the dictID check in fuzzing mode, because it makes the search + * harder. + */ + RETURN_ERROR_IF(dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID), + dictionary_wrong, ""); +#endif + if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); + return 0; +} + +static ZSTD_frameSizeInfo ZSTD_errorFrameSizeInfo(size_t ret) +{ + ZSTD_frameSizeInfo frameSizeInfo; + frameSizeInfo.compressedSize = ret; + frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; + return frameSizeInfo; +} + +static ZSTD_frameSizeInfo ZSTD_findFrameSizeInfo(const void* src, size_t srcSize) +{ + ZSTD_frameSizeInfo frameSizeInfo; + memset(&frameSizeInfo, 0, sizeof(ZSTD_frameSizeInfo)); + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) + return ZSTD_findFrameSizeInfoLegacy(src, srcSize); +#endif + + if ((srcSize >= ZSTD_SKIPPABLEHEADERSIZE) + && (MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + frameSizeInfo.compressedSize = readSkippableFrameSize(src, srcSize); + assert(ZSTD_isError(frameSizeInfo.compressedSize) || + frameSizeInfo.compressedSize <= srcSize); + return frameSizeInfo; + } else { + const BYTE* ip = (const BYTE*)src; + const BYTE* const ipstart = ip; + size_t remainingSize = srcSize; + size_t nbBlocks = 0; + ZSTD_frameHeader zfh; + + /* Extract Frame Header */ + { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ZSTD_isError(ret)) + return ZSTD_errorFrameSizeInfo(ret); + if (ret > 0) + return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); + } + + ip += zfh.headerSize; + remainingSize -= zfh.headerSize; + + /* Iterate over each block */ + while (1) { + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) + return ZSTD_errorFrameSizeInfo(cBlockSize); + + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) + return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); + + ip += ZSTD_blockHeaderSize + cBlockSize; + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; + nbBlocks++; + + if (blockProperties.lastBlock) break; + } + + /* Final frame content checksum */ + if (zfh.checksumFlag) { + if (remainingSize < 4) + return ZSTD_errorFrameSizeInfo(ERROR(srcSize_wrong)); + ip += 4; + } + + frameSizeInfo.compressedSize = ip - ipstart; + frameSizeInfo.decompressedBound = (zfh.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) + ? zfh.frameContentSize + : nbBlocks * zfh.blockSizeMax; + return frameSizeInfo; + } +} + +/** ZSTD_findFrameCompressedSize() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the compressed size of the frame starting at `src` */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) +{ + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); + return frameSizeInfo.compressedSize; +} + +/** ZSTD_decompressBound() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame or a skippeable frame + * `srcSize` must be at least as large as the frame contained + * @return : the maximum decompressed size of the compressed source + */ +unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize) +{ + unsigned long long bound = 0; + /* Iterate over each frame */ + while (srcSize > 0) { + ZSTD_frameSizeInfo const frameSizeInfo = ZSTD_findFrameSizeInfo(src, srcSize); + size_t const compressedSize = frameSizeInfo.compressedSize; + unsigned long long const decompressedBound = frameSizeInfo.decompressedBound; + if (ZSTD_isError(compressedSize) || decompressedBound == ZSTD_CONTENTSIZE_ERROR) + return ZSTD_CONTENTSIZE_ERROR; + assert(srcSize >= compressedSize); + src = (const BYTE*)src + compressedSize; + srcSize -= compressedSize; + bound += decompressedBound; + } + return bound; +} + + +/*-************************************************************* + * Frame decoding + ***************************************************************/ + +/** ZSTD_insertBlock() : + * insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ +size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) +{ + DEBUGLOG(5, "ZSTD_insertBlock: %u bytes", (unsigned)blockSize); + ZSTD_checkContinuity(dctx, blockStart); + dctx->previousDstEnd = (const char*)blockStart + blockSize; + return blockSize; +} + + +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_copyRawBlock"); + if (dst == NULL) { + if (srcSize == 0) return 0; + RETURN_ERROR(dstBuffer_null, ""); + } + RETURN_ERROR_IF(srcSize > dstCapacity, dstSize_tooSmall, ""); + memcpy(dst, src, srcSize); + return srcSize; +} + +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, + BYTE b, + size_t regenSize) +{ + if (dst == NULL) { + if (regenSize == 0) return 0; + RETURN_ERROR(dstBuffer_null, ""); + } + RETURN_ERROR_IF(regenSize > dstCapacity, dstSize_tooSmall, ""); + memset(dst, b, regenSize); + return regenSize; +} + + +/*! ZSTD_decompressFrame() : + * @dctx must be properly initialized + * will update *srcPtr and *srcSizePtr, + * to make *srcPtr progress by one frame. */ +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void** srcPtr, size_t *srcSizePtr) +{ + const BYTE* ip = (const BYTE*)(*srcPtr); + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = dstCapacity != 0 ? ostart + dstCapacity : ostart; + BYTE* op = ostart; + size_t remainingSrcSize = *srcSizePtr; + + DEBUGLOG(4, "ZSTD_decompressFrame (srcSize:%i)", (int)*srcSizePtr); + + /* check */ + RETURN_ERROR_IF( + remainingSrcSize < ZSTD_FRAMEHEADERSIZE_MIN(dctx->format)+ZSTD_blockHeaderSize, + srcSize_wrong, ""); + + /* Frame Header */ + { size_t const frameHeaderSize = ZSTD_frameHeaderSize_internal( + ip, ZSTD_FRAMEHEADERSIZE_PREFIX(dctx->format), dctx->format); + if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; + RETURN_ERROR_IF(remainingSrcSize < frameHeaderSize+ZSTD_blockHeaderSize, + srcSize_wrong, ""); + FORWARD_IF_ERROR( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) , ""); + ip += frameHeaderSize; remainingSrcSize -= frameHeaderSize; + } + + /* Loop on each block */ + while (1) { + size_t decodedSize; + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + ip += ZSTD_blockHeaderSize; + remainingSrcSize -= ZSTD_blockHeaderSize; + RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); + + switch(blockProperties.blockType) + { + case bt_compressed: + decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1); + break; + case bt_raw : + decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); + break; + case bt_rle : + decodedSize = ZSTD_setRleBlock(op, oend-op, *ip, blockProperties.origSize); + break; + case bt_reserved : + default: + RETURN_ERROR(corruption_detected, "invalid block type"); + } + + if (ZSTD_isError(decodedSize)) return decodedSize; + if (dctx->fParams.checksumFlag) + XXH64_update(&dctx->xxhState, op, decodedSize); + if (decodedSize != 0) + op += decodedSize; + assert(ip != NULL); + ip += cBlockSize; + remainingSrcSize -= cBlockSize; + if (blockProperties.lastBlock) break; + } + + if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) { + RETURN_ERROR_IF((U64)(op-ostart) != dctx->fParams.frameContentSize, + corruption_detected, ""); + } + if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ + U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); + U32 checkRead; + RETURN_ERROR_IF(remainingSrcSize<4, checksum_wrong, ""); + checkRead = MEM_readLE32(ip); + RETURN_ERROR_IF(checkRead != checkCalc, checksum_wrong, ""); + ip += 4; + remainingSrcSize -= 4; + } + + /* Allow caller to get size read */ + *srcPtr = ip; + *srcSizePtr = remainingSrcSize; + return op-ostart; +} + +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize, + const ZSTD_DDict* ddict) +{ + void* const dststart = dst; + int moreThan1Frame = 0; + + DEBUGLOG(5, "ZSTD_decompressMultiFrame"); + assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */ + + if (ddict) { + dict = ZSTD_DDict_dictContent(ddict); + dictSize = ZSTD_DDict_dictSize(ddict); + } + + while (srcSize >= ZSTD_startingInputLength(dctx->format)) { + +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1) + if (ZSTD_isLegacy(src, srcSize)) { + size_t decodedSize; + size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize); + if (ZSTD_isError(frameSize)) return frameSize; + RETURN_ERROR_IF(dctx->staticSize, memory_allocation, + "legacy support is not compatible with static dctx"); + + decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + + assert(decodedSize <=- dstCapacity); + dst = (BYTE*)dst + decodedSize; + dstCapacity -= decodedSize; + + src = (const BYTE*)src + frameSize; + srcSize -= frameSize; + + continue; + } +#endif + + { U32 const magicNumber = MEM_readLE32(src); + DEBUGLOG(4, "reading magic number %08X (expecting %08X)", + (unsigned)magicNumber, ZSTD_MAGICNUMBER); + if ((magicNumber & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t const skippableSize = readSkippableFrameSize(src, srcSize); + FORWARD_IF_ERROR(skippableSize, "readSkippableFrameSize failed"); + assert(skippableSize <= srcSize); + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } } + + if (ddict) { + /* we were called from ZSTD_decompress_usingDDict */ + FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(dctx, ddict), ""); + } else { + /* this will initialize correctly with no dict if dict == NULL, so + * use this in all cases but ddict */ + FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize), ""); + } + ZSTD_checkContinuity(dctx, dst); + + { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, + &src, &srcSize); + RETURN_ERROR_IF( + (ZSTD_getErrorCode(res) == ZSTD_error_prefix_unknown) + && (moreThan1Frame==1), + srcSize_wrong, + "at least one frame successfully completed, but following " + "bytes are garbage: it's more likely to be a srcSize error, " + "specifying more bytes than compressed size of frame(s). This " + "error message replaces ERROR(prefix_unknown), which would be " + "confusing, as the first header is actually correct. Note that " + "one could be unlucky, it might be a corruption error instead, " + "happening right at the place where we expect zstd magic " + "bytes. But this is _much_ less likely than a srcSize field " + "error."); + if (ZSTD_isError(res)) return res; + assert(res <= dstCapacity); + if (res != 0) + dst = (BYTE*)dst + res; + dstCapacity -= res; + } + moreThan1Frame = 1; + } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */ + + RETURN_ERROR_IF(srcSize, srcSize_wrong, "input not entirely consumed"); + + return (BYTE*)dst - (BYTE*)dststart; +} + +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict, size_t dictSize) +{ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); +} + + +static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) +{ + switch (dctx->dictUses) { + default: + assert(0 /* Impossible */); + /* fall-through */ + case ZSTD_dont_use: + ZSTD_clearDict(dctx); + return NULL; + case ZSTD_use_indefinitely: + return dctx->ddict; + case ZSTD_use_once: + dctx->dictUses = ZSTD_dont_use; + return dctx->ddict; + } +} + +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_decompress_usingDDict(dctx, dst, dstCapacity, src, srcSize, ZSTD_getDDict(dctx)); +} + + +size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ +#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) + size_t regenSize; + ZSTD_DCtx* const dctx = ZSTD_createDCtx(); + RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); + regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); + ZSTD_freeDCtx(dctx); + return regenSize; +#else /* stack mode */ + ZSTD_DCtx dctx; + ZSTD_initDCtx_internal(&dctx); + return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize); +#endif +} + + +/*-************************************** +* Advanced Streaming Decompression API +* Bufferless and synchronous +****************************************/ +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } + +/** + * Similar to ZSTD_nextSrcSizeToDecompress(), but when when a block input can be streamed, + * we allow taking a partial block as the input. Currently only raw uncompressed blocks can + * be streamed. + * + * For blocks that can be streamed, this allows us to reduce the latency until we produce + * output, and avoid copying the input. + * + * @param inputSize - The total amount of input that the caller currently has. + */ +static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t inputSize) { + if (!(dctx->stage == ZSTDds_decompressBlock || dctx->stage == ZSTDds_decompressLastBlock)) + return dctx->expected; + if (dctx->bType != bt_raw) + return dctx->expected; + return MIN(MAX(inputSize, 1), dctx->expected); +} + +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { + switch(dctx->stage) + { + default: /* should not happen */ + assert(0); + case ZSTDds_getFrameHeaderSize: + case ZSTDds_decodeFrameHeader: + return ZSTDnit_frameHeader; + case ZSTDds_decodeBlockHeader: + return ZSTDnit_blockHeader; + case ZSTDds_decompressBlock: + return ZSTDnit_block; + case ZSTDds_decompressLastBlock: + return ZSTDnit_lastBlock; + case ZSTDds_checkChecksum: + return ZSTDnit_checksum; + case ZSTDds_decodeSkippableHeader: + case ZSTDds_skipFrame: + return ZSTDnit_skippableFrame; + } +} + +static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } + +/** ZSTD_decompressContinue() : + * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress()) + * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) + * or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (unsigned)srcSize); + /* Sanity check */ + RETURN_ERROR_IF(srcSize != ZSTD_nextSrcSizeToDecompressWithInputSize(dctx, srcSize), srcSize_wrong, "not allowed"); + if (dstCapacity) ZSTD_checkContinuity(dctx, dst); + + switch (dctx->stage) + { + case ZSTDds_getFrameHeaderSize : + assert(src != NULL); + if (dctx->format == ZSTD_f_zstd1) { /* allows header */ + assert(srcSize >= ZSTD_FRAMEIDSIZE); /* to read skippable magic number */ + if ((MEM_readLE32(src) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + memcpy(dctx->headerBuffer, src, srcSize); + dctx->expected = ZSTD_SKIPPABLEHEADERSIZE - srcSize; /* remaining to load to get full skippable frame header */ + dctx->stage = ZSTDds_decodeSkippableHeader; + return 0; + } } + dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format); + if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; + memcpy(dctx->headerBuffer, src, srcSize); + dctx->expected = dctx->headerSize - srcSize; + dctx->stage = ZSTDds_decodeFrameHeader; + return 0; + + case ZSTDds_decodeFrameHeader: + assert(src != NULL); + memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize); + FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize), ""); + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTDds_decodeBlockHeader; + return 0; + + case ZSTDds_decodeBlockHeader: + { blockProperties_t bp; + size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + RETURN_ERROR_IF(cBlockSize > dctx->fParams.blockSizeMax, corruption_detected, "Block Size Exceeds Maximum"); + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize) { + dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; + return 0; + } + /* empty block */ + if (bp.lastBlock) { + if (dctx->fParams.checksumFlag) { + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* end of frame */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */ + dctx->stage = ZSTDds_decodeBlockHeader; + } + return 0; + } + + case ZSTDds_decompressLastBlock: + case ZSTDds_decompressBlock: + DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock"); + { size_t rSize; + switch(dctx->bType) + { + case bt_compressed: + DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); + dctx->expected = 0; /* Streaming not supported */ + break; + case bt_raw : + assert(srcSize <= dctx->expected); + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + FORWARD_IF_ERROR(rSize, "ZSTD_copyRawBlock failed"); + assert(rSize == srcSize); + dctx->expected -= rSize; + break; + case bt_rle : + rSize = ZSTD_setRleBlock(dst, dstCapacity, *(const BYTE*)src, dctx->rleSize); + dctx->expected = 0; /* Streaming not supported */ + break; + case bt_reserved : /* should never happen */ + default: + RETURN_ERROR(corruption_detected, "invalid block type"); + } + FORWARD_IF_ERROR(rSize, ""); + RETURN_ERROR_IF(rSize > dctx->fParams.blockSizeMax, corruption_detected, "Decompressed Block Size Exceeds Maximum"); + DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (unsigned)rSize); + dctx->decodedSize += rSize; + if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); + dctx->previousDstEnd = (char*)dst + rSize; + + /* Stay on the same stage until we are finished streaming the block. */ + if (dctx->expected > 0) { + return rSize; + } + + if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ + DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (unsigned)dctx->decodedSize); + RETURN_ERROR_IF( + dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN + && dctx->decodedSize != dctx->fParams.frameContentSize, + corruption_detected, ""); + if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* ends here */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->stage = ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; + } + return rSize; + } + + case ZSTDds_checkChecksum: + assert(srcSize == 4); /* guaranteed by dctx->expected */ + { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); + U32 const check32 = MEM_readLE32(src); + DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", (unsigned)h32, (unsigned)check32); + RETURN_ERROR_IF(check32 != h32, checksum_wrong, ""); + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } + + case ZSTDds_decodeSkippableHeader: + assert(src != NULL); + assert(srcSize <= ZSTD_SKIPPABLEHEADERSIZE); + memcpy(dctx->headerBuffer + (ZSTD_SKIPPABLEHEADERSIZE - srcSize), src, srcSize); /* complete skippable header */ + dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_FRAMEIDSIZE); /* note : dctx->expected can grow seriously large, beyond local buffer size */ + dctx->stage = ZSTDds_skipFrame; + return 0; + + case ZSTDds_skipFrame: + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + + default: + assert(0); /* impossible */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ + } +} + + +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); + dctx->prefixStart = dict; + dctx->previousDstEnd = (const char*)dict + dictSize; +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + dctx->dictContentBeginForFuzzing = dctx->prefixStart; + dctx->dictContentEndForFuzzing = dctx->previousDstEnd; +#endif + return 0; +} + +/*! ZSTD_loadDEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * @return : size of entropy tables read */ +size_t +ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, + const void* const dict, size_t const dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + + RETURN_ERROR_IF(dictSize <= 8, dictionary_corrupted, "dict is too small"); + assert(MEM_readLE32(dict) == ZSTD_MAGIC_DICTIONARY); /* dict must be valid */ + dictPtr += 8; /* skip header = magic + dictID */ + + ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, OFTable) == offsetof(ZSTD_entropyDTables_t, LLTable) + sizeof(entropy->LLTable)); + ZSTD_STATIC_ASSERT(offsetof(ZSTD_entropyDTables_t, MLTable) == offsetof(ZSTD_entropyDTables_t, OFTable) + sizeof(entropy->OFTable)); + ZSTD_STATIC_ASSERT(sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable) >= HUF_DECOMPRESS_WORKSPACE_SIZE); + { void* const workspace = &entropy->LLTable; /* use fse tables as temporary workspace; implies fse tables are grouped together */ + size_t const workspaceSize = sizeof(entropy->LLTable) + sizeof(entropy->OFTable) + sizeof(entropy->MLTable); +#ifdef HUF_FORCE_DECOMPRESS_X1 + /* in minimal huffman, we always use X1 variants */ + size_t const hSize = HUF_readDTableX1_wksp(entropy->hufTable, + dictPtr, dictEnd - dictPtr, + workspace, workspaceSize); +#else + size_t const hSize = HUF_readDTableX2_wksp(entropy->hufTable, + dictPtr, dictEnd - dictPtr, + workspace, workspaceSize); +#endif + RETURN_ERROR_IF(HUF_isError(hSize), dictionary_corrupted, ""); + dictPtr += hSize; + } + + { short offcodeNCount[MaxOff+1]; + unsigned offcodeMaxValue = MaxOff, offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeMaxValue > MaxOff, dictionary_corrupted, ""); + RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, ""); + ZSTD_buildFSETable( entropy->OFTable, + offcodeNCount, offcodeMaxValue, + OF_base, OF_bits, + offcodeLog); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthMaxValue > MaxML, dictionary_corrupted, ""); + RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, ""); + ZSTD_buildFSETable( entropy->MLTable, + matchlengthNCount, matchlengthMaxValue, + ML_base, ML_bits, + matchlengthLog); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthMaxValue > MaxLL, dictionary_corrupted, ""); + RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, ""); + ZSTD_buildFSETable( entropy->LLTable, + litlengthNCount, litlengthMaxValue, + LL_base, LL_bits, + litlengthLog); + dictPtr += litlengthHeaderSize; + } + + RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, ""); + { int i; + size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); + for (i=0; i<3; i++) { + U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; + RETURN_ERROR_IF(rep==0 || rep > dictContentSize, + dictionary_corrupted, ""); + entropy->rep[i] = rep; + } } + + return dictPtr - (const BYTE*)dict; +} + +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); + { U32 const magic = MEM_readLE32(dict); + if (magic != ZSTD_MAGIC_DICTIONARY) { + return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ + } } + dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); + + /* load entropy tables */ + { size_t const eSize = ZSTD_loadDEntropy(&dctx->entropy, dict, dictSize); + RETURN_ERROR_IF(ZSTD_isError(eSize), dictionary_corrupted, ""); + dict = (const char*)dict + eSize; + dictSize -= eSize; + } + dctx->litEntropy = dctx->fseEntropy = 1; + + /* reference dictionary content */ + return ZSTD_refDictContent(dctx, dict, dictSize); +} + +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) +{ + assert(dctx != NULL); + dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */ + dctx->stage = ZSTDds_getFrameHeaderSize; + dctx->decodedSize = 0; + dctx->previousDstEnd = NULL; + dctx->prefixStart = NULL; + dctx->virtualStart = NULL; + dctx->dictEnd = NULL; + dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + dctx->bType = bt_reserved; + ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); + memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ + dctx->LLTptr = dctx->entropy.LLTable; + dctx->MLTptr = dctx->entropy.MLTable; + dctx->OFTptr = dctx->entropy.OFTable; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; +} + +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); + if (dict && dictSize) + RETURN_ERROR_IF( + ZSTD_isError(ZSTD_decompress_insertDictionary(dctx, dict, dictSize)), + dictionary_corrupted, ""); + return 0; +} + + +/* ====== ZSTD_DDict ====== */ + +size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) +{ + DEBUGLOG(4, "ZSTD_decompressBegin_usingDDict"); + assert(dctx != NULL); + if (ddict) { + const char* const dictStart = (const char*)ZSTD_DDict_dictContent(ddict); + size_t const dictSize = ZSTD_DDict_dictSize(ddict); + const void* const dictEnd = dictStart + dictSize; + dctx->ddictIsCold = (dctx->dictEnd != dictEnd); + DEBUGLOG(4, "DDict is %s", + dctx->ddictIsCold ? "~cold~" : "hot!"); + } + FORWARD_IF_ERROR( ZSTD_decompressBegin(dctx) , ""); + if (ddict) { /* NULL ddict is equivalent to no dictionary */ + ZSTD_copyDDictParameters(dctx, ddict); + } + return 0; +} + +/*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) +{ + if (dictSize < 8) return 0; + if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0; + return MEM_readLE32((const char*)dict + ZSTD_FRAMEIDSIZE); +} + +/*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompress frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary (most common case). + * - The frame was built with dictID intentionally removed. + * Needed dictionary is a hidden information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, frame header could not be decoded. + * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`. + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to use + * ZSTD_getFrameHeader(), which will provide a more precise error code. */ +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) +{ + ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 }; + size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize); + if (ZSTD_isError(hError)) return 0; + return zfp.dictID; +} + + +/*! ZSTD_decompress_usingDDict() : +* Decompression using a pre-digested Dictionary +* Use dictionary without significant overhead. */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_DDict* ddict) +{ + /* pass content and size in case legacy frames are encountered */ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, + NULL, 0, + ddict); +} + + +/*===================================== +* Streaming decompression +*====================================*/ + +ZSTD_DStream* ZSTD_createDStream(void) +{ + DEBUGLOG(3, "ZSTD_createDStream"); + return ZSTD_createDStream_advanced(ZSTD_defaultCMem); +} + +ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) +{ + return ZSTD_initStaticDCtx(workspace, workspaceSize); +} + +ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDCtx_advanced(customMem); +} + +size_t ZSTD_freeDStream(ZSTD_DStream* zds) +{ + return ZSTD_freeDCtx(zds); +} + + +/* *** Initialization *** */ + +size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; } +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; } + +size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, + const void* dict, size_t dictSize, + ZSTD_dictLoadMethod_e dictLoadMethod, + ZSTD_dictContentType_e dictContentType) +{ + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); + ZSTD_clearDict(dctx); + if (dict && dictSize != 0) { + dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem); + RETURN_ERROR_IF(dctx->ddictLocal == NULL, memory_allocation, "NULL pointer!"); + dctx->ddict = dctx->ddictLocal; + dctx->dictUses = ZSTD_use_indefinitely; + } + return 0; +} + +size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); +} + +size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); +} + +size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType) +{ + FORWARD_IF_ERROR(ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType), ""); + dctx->dictUses = ZSTD_use_once; + return 0; +} + +size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize) +{ + return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent); +} + + +/* ZSTD_initDStream_usingDict() : + * return : expected size, aka ZSTD_startingInputLength(). + * this function cannot fail */ +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize) +{ + DEBUGLOG(4, "ZSTD_initDStream_usingDict"); + FORWARD_IF_ERROR( ZSTD_DCtx_reset(zds, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) , ""); + return ZSTD_startingInputLength(zds->format); +} + +/* note : this variant can't fail */ +size_t ZSTD_initDStream(ZSTD_DStream* zds) +{ + DEBUGLOG(4, "ZSTD_initDStream"); + return ZSTD_initDStream_usingDDict(zds, NULL); +} + +/* ZSTD_initDStream_usingDDict() : + * ddict will just be referenced, and must outlive decompression session + * this function cannot fail */ +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict) +{ + FORWARD_IF_ERROR( ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only) , ""); + FORWARD_IF_ERROR( ZSTD_DCtx_refDDict(dctx, ddict) , ""); + return ZSTD_startingInputLength(dctx->format); +} + +/* ZSTD_resetDStream() : + * return : expected size, aka ZSTD_startingInputLength(). + * this function cannot fail */ +size_t ZSTD_resetDStream(ZSTD_DStream* dctx) +{ + FORWARD_IF_ERROR(ZSTD_DCtx_reset(dctx, ZSTD_reset_session_only), ""); + return ZSTD_startingInputLength(dctx->format); +} + + +size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict) +{ + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); + ZSTD_clearDict(dctx); + if (ddict) { + dctx->ddict = ddict; + dctx->dictUses = ZSTD_use_indefinitely; + } + return 0; +} + +/* ZSTD_DCtx_setMaxWindowSize() : + * note : no direct equivalence in ZSTD_DCtx_setParameter, + * since this version sets windowSize, and the other sets windowLog */ +size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize) +{ + ZSTD_bounds const bounds = ZSTD_dParam_getBounds(ZSTD_d_windowLogMax); + size_t const min = (size_t)1 << bounds.lowerBound; + size_t const max = (size_t)1 << bounds.upperBound; + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); + RETURN_ERROR_IF(maxWindowSize < min, parameter_outOfBound, ""); + RETURN_ERROR_IF(maxWindowSize > max, parameter_outOfBound, ""); + dctx->maxWindowSize = maxWindowSize; + return 0; +} + +size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format) +{ + return ZSTD_DCtx_setParameter(dctx, ZSTD_d_format, format); +} + +ZSTD_bounds ZSTD_dParam_getBounds(ZSTD_dParameter dParam) +{ + ZSTD_bounds bounds = { 0, 0, 0 }; + switch(dParam) { + case ZSTD_d_windowLogMax: + bounds.lowerBound = ZSTD_WINDOWLOG_ABSOLUTEMIN; + bounds.upperBound = ZSTD_WINDOWLOG_MAX; + return bounds; + case ZSTD_d_format: + bounds.lowerBound = (int)ZSTD_f_zstd1; + bounds.upperBound = (int)ZSTD_f_zstd1_magicless; + ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless); + return bounds; + case ZSTD_d_stableOutBuffer: + bounds.lowerBound = (int)ZSTD_obm_buffered; + bounds.upperBound = (int)ZSTD_obm_stable; + return bounds; + default:; + } + bounds.error = ERROR(parameter_unsupported); + return bounds; +} + +/* ZSTD_dParam_withinBounds: + * @return 1 if value is within dParam bounds, + * 0 otherwise */ +static int ZSTD_dParam_withinBounds(ZSTD_dParameter dParam, int value) +{ + ZSTD_bounds const bounds = ZSTD_dParam_getBounds(dParam); + if (ZSTD_isError(bounds.error)) return 0; + if (value < bounds.lowerBound) return 0; + if (value > bounds.upperBound) return 0; + return 1; +} + +#define CHECK_DBOUNDS(p,v) { \ + RETURN_ERROR_IF(!ZSTD_dParam_withinBounds(p, v), parameter_outOfBound, ""); \ +} + +size_t ZSTD_DCtx_setParameter(ZSTD_DCtx* dctx, ZSTD_dParameter dParam, int value) +{ + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); + switch(dParam) { + case ZSTD_d_windowLogMax: + if (value == 0) value = ZSTD_WINDOWLOG_LIMIT_DEFAULT; + CHECK_DBOUNDS(ZSTD_d_windowLogMax, value); + dctx->maxWindowSize = ((size_t)1) << value; + return 0; + case ZSTD_d_format: + CHECK_DBOUNDS(ZSTD_d_format, value); + dctx->format = (ZSTD_format_e)value; + return 0; + case ZSTD_d_stableOutBuffer: + CHECK_DBOUNDS(ZSTD_d_stableOutBuffer, value); + dctx->outBufferMode = (ZSTD_outBufferMode_e)value; + return 0; + default:; + } + RETURN_ERROR(parameter_unsupported, ""); +} + +size_t ZSTD_DCtx_reset(ZSTD_DCtx* dctx, ZSTD_ResetDirective reset) +{ + if ( (reset == ZSTD_reset_session_only) + || (reset == ZSTD_reset_session_and_parameters) ) { + dctx->streamStage = zdss_init; + dctx->noForwardProgress = 0; + } + if ( (reset == ZSTD_reset_parameters) + || (reset == ZSTD_reset_session_and_parameters) ) { + RETURN_ERROR_IF(dctx->streamStage != zdss_init, stage_wrong, ""); + ZSTD_clearDict(dctx); + dctx->format = ZSTD_f_zstd1; + dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + } + return 0; +} + + +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) +{ + return ZSTD_sizeof_DCtx(dctx); +} + +size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) +{ + size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); + size_t const minRBSize = (size_t) neededSize; + RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, + frameParameter_windowTooLarge, ""); + return minRBSize; +} + +size_t ZSTD_estimateDStreamSize(size_t windowSize) +{ + size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX); + size_t const inBuffSize = blockSize; /* no block can be larger */ + size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN); + return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize; +} + +size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize) +{ + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable, but requires an additional parameter (or a dctx) */ + ZSTD_frameHeader zfh; + size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize); + if (ZSTD_isError(err)) return err; + RETURN_ERROR_IF(err>0, srcSize_wrong, ""); + RETURN_ERROR_IF(zfh.windowSize > windowSizeMax, + frameParameter_windowTooLarge, ""); + return ZSTD_estimateDStreamSize((size_t)zfh.windowSize); +} + + +/* ***** Decompression ***** */ + +static int ZSTD_DCtx_isOverflow(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) +{ + return (zds->inBuffSize + zds->outBuffSize) >= (neededInBuffSize + neededOutBuffSize) * ZSTD_WORKSPACETOOLARGE_FACTOR; +} + +static void ZSTD_DCtx_updateOversizedDuration(ZSTD_DStream* zds, size_t const neededInBuffSize, size_t const neededOutBuffSize) +{ + if (ZSTD_DCtx_isOverflow(zds, neededInBuffSize, neededOutBuffSize)) + zds->oversizedDuration++; + else + zds->oversizedDuration = 0; +} + +static int ZSTD_DCtx_isOversizedTooLong(ZSTD_DStream* zds) +{ + return zds->oversizedDuration >= ZSTD_WORKSPACETOOLARGE_MAXDURATION; +} + +/* Checks that the output buffer hasn't changed if ZSTD_obm_stable is used. */ +static size_t ZSTD_checkOutBuffer(ZSTD_DStream const* zds, ZSTD_outBuffer const* output) +{ + ZSTD_outBuffer const expect = zds->expectedOutBuffer; + /* No requirement when ZSTD_obm_stable is not enabled. */ + if (zds->outBufferMode != ZSTD_obm_stable) + return 0; + /* Any buffer is allowed in zdss_init, this must be the same for every other call until + * the context is reset. + */ + if (zds->streamStage == zdss_init) + return 0; + /* The buffer must match our expectation exactly. */ + if (expect.dst == output->dst && expect.pos == output->pos && expect.size == output->size) + return 0; + RETURN_ERROR(dstBuffer_wrong, "ZSTD_obm_stable enabled but output differs!"); +} + +/* Calls ZSTD_decompressContinue() with the right parameters for ZSTD_decompressStream() + * and updates the stage and the output buffer state. This call is extracted so it can be + * used both when reading directly from the ZSTD_inBuffer, and in buffered input mode. + * NOTE: You must break after calling this function since the streamStage is modified. + */ +static size_t ZSTD_decompressContinueStream( + ZSTD_DStream* zds, char** op, char* oend, + void const* src, size_t srcSize) { + int const isSkipFrame = ZSTD_isSkipFrame(zds); + if (zds->outBufferMode == ZSTD_obm_buffered) { + size_t const dstSize = isSkipFrame ? 0 : zds->outBuffSize - zds->outStart; + size_t const decodedSize = ZSTD_decompressContinue(zds, + zds->outBuff + zds->outStart, dstSize, src, srcSize); + FORWARD_IF_ERROR(decodedSize, ""); + if (!decodedSize && !isSkipFrame) { + zds->streamStage = zdss_read; + } else { + zds->outEnd = zds->outStart + decodedSize; + zds->streamStage = zdss_flush; + } + } else { + /* Write directly into the output buffer */ + size_t const dstSize = isSkipFrame ? 0 : oend - *op; + size_t const decodedSize = ZSTD_decompressContinue(zds, *op, dstSize, src, srcSize); + FORWARD_IF_ERROR(decodedSize, ""); + *op += decodedSize; + /* Flushing is not needed. */ + zds->streamStage = zdss_read; + assert(*op <= oend); + assert(zds->outBufferMode == ZSTD_obm_stable); + } + return 0; +} + +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + const char* const src = (const char*)input->src; + const char* const istart = input->pos != 0 ? src + input->pos : src; + const char* const iend = input->size != 0 ? src + input->size : src; + const char* ip = istart; + char* const dst = (char*)output->dst; + char* const ostart = output->pos != 0 ? dst + output->pos : dst; + char* const oend = output->size != 0 ? dst + output->size : dst; + char* op = ostart; + U32 someMoreWork = 1; + + DEBUGLOG(5, "ZSTD_decompressStream"); + RETURN_ERROR_IF( + input->pos > input->size, + srcSize_wrong, + "forbidden. in: pos: %u vs size: %u", + (U32)input->pos, (U32)input->size); + RETURN_ERROR_IF( + output->pos > output->size, + dstSize_tooSmall, + "forbidden. out: pos: %u vs size: %u", + (U32)output->pos, (U32)output->size); + DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos)); + FORWARD_IF_ERROR(ZSTD_checkOutBuffer(zds, output), ""); + + while (someMoreWork) { + switch(zds->streamStage) + { + case zdss_init : + DEBUGLOG(5, "stage zdss_init => transparent reset "); + zds->streamStage = zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + zds->legacyVersion = 0; + zds->hostageByte = 0; + zds->expectedOutBuffer = *output; + /* fall-through */ + + case zdss_loadHeader : + DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) + if (zds->legacyVersion) { + RETURN_ERROR_IF(zds->staticSize, memory_allocation, + "legacy support is incompatible with static dctx"); + { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input); + if (hint==0) zds->streamStage = zdss_init; + return hint; + } } +#endif + { size_t const hSize = ZSTD_getFrameHeader_advanced(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format); + DEBUGLOG(5, "header size : %u", (U32)hSize); + if (ZSTD_isError(hSize)) { +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) + U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart); + if (legacyVersion) { + ZSTD_DDict const* const ddict = ZSTD_getDDict(zds); + const void* const dict = ddict ? ZSTD_DDict_dictContent(ddict) : NULL; + size_t const dictSize = ddict ? ZSTD_DDict_dictSize(ddict) : 0; + DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion); + RETURN_ERROR_IF(zds->staticSize, memory_allocation, + "legacy support is incompatible with static dctx"); + FORWARD_IF_ERROR(ZSTD_initLegacyStream(&zds->legacyContext, + zds->previousLegacyVersion, legacyVersion, + dict, dictSize), ""); + zds->legacyVersion = zds->previousLegacyVersion = legacyVersion; + { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input); + if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */ + return hint; + } } +#endif + return hSize; /* error */ + } + if (hSize != 0) { /* need more input */ + size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ + size_t const remainingInput = (size_t)(iend-ip); + assert(iend >= ip); + if (toLoad > remainingInput) { /* not enough input to load full header */ + if (remainingInput > 0) { + memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput); + zds->lhSize += remainingInput; + } + input->pos = input->size; + return (MAX((size_t)ZSTD_FRAMEHEADERSIZE_MIN(zds->format), hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ + } + assert(ip != NULL); + memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; + break; + } } + + /* check for single-pass mode opportunity */ + if (zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN + && zds->fParams.frameType != ZSTD_skippableFrame + && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { + size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); + if (cSize <= (size_t)(iend-istart)) { + /* shortcut : using single-pass mode */ + size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, ZSTD_getDDict(zds)); + if (ZSTD_isError(decompressedSize)) return decompressedSize; + DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()") + ip = istart + cSize; + op += decompressedSize; + zds->expected = 0; + zds->streamStage = zdss_init; + someMoreWork = 0; + break; + } } + + /* Check output buffer is large enough for ZSTD_odm_stable. */ + if (zds->outBufferMode == ZSTD_obm_stable + && zds->fParams.frameType != ZSTD_skippableFrame + && zds->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN + && (U64)(size_t)(oend-op) < zds->fParams.frameContentSize) { + RETURN_ERROR(dstSize_tooSmall, "ZSTD_obm_stable passed but ZSTD_outBuffer is too small"); + } + + /* Consume header (see ZSTDds_decodeFrameHeader) */ + DEBUGLOG(4, "Consume header"); + FORWARD_IF_ERROR(ZSTD_decompressBegin_usingDDict(zds, ZSTD_getDDict(zds)), ""); + + if ((MEM_readLE32(zds->headerBuffer) & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_FRAMEIDSIZE); + zds->stage = ZSTDds_skipFrame; + } else { + FORWARD_IF_ERROR(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize), ""); + zds->expected = ZSTD_blockHeaderSize; + zds->stage = ZSTDds_decodeBlockHeader; + } + + /* control buffer memory usage */ + DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)", + (U32)(zds->fParams.windowSize >>10), + (U32)(zds->maxWindowSize >> 10) ); + zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); + RETURN_ERROR_IF(zds->fParams.windowSize > zds->maxWindowSize, + frameParameter_windowTooLarge, ""); + + /* Adapt buffer sizes to frame header instructions */ + { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */); + size_t const neededOutBuffSize = zds->outBufferMode == ZSTD_obm_buffered + ? ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize) + : 0; + + ZSTD_DCtx_updateOversizedDuration(zds, neededInBuffSize, neededOutBuffSize); + + { int const tooSmall = (zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize); + int const tooLarge = ZSTD_DCtx_isOversizedTooLong(zds); + + if (tooSmall || tooLarge) { + size_t const bufferSize = neededInBuffSize + neededOutBuffSize; + DEBUGLOG(4, "inBuff : from %u to %u", + (U32)zds->inBuffSize, (U32)neededInBuffSize); + DEBUGLOG(4, "outBuff : from %u to %u", + (U32)zds->outBuffSize, (U32)neededOutBuffSize); + if (zds->staticSize) { /* static DCtx */ + DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize); + assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */ + RETURN_ERROR_IF( + bufferSize > zds->staticSize - sizeof(ZSTD_DCtx), + memory_allocation, ""); + } else { + ZSTD_free(zds->inBuff, zds->customMem); + zds->inBuffSize = 0; + zds->outBuffSize = 0; + zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem); + RETURN_ERROR_IF(zds->inBuff == NULL, memory_allocation, ""); + } + zds->inBuffSize = neededInBuffSize; + zds->outBuff = zds->inBuff + zds->inBuffSize; + zds->outBuffSize = neededOutBuffSize; + } } } + zds->streamStage = zdss_read; + /* fall-through */ + + case zdss_read: + DEBUGLOG(5, "stage zdss_read"); + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip); + DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize); + if (neededInSize==0) { /* end of frame */ + zds->streamStage = zdss_init; + someMoreWork = 0; + break; + } + if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ + FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, ip, neededInSize), ""); + ip += neededInSize; + /* Function modifies the stage so we must break */ + break; + } } + if (ip==iend) { someMoreWork = 0; break; } /* no more input */ + zds->streamStage = zdss_load; + /* fall-through */ + + case zdss_load: + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); + size_t const toLoad = neededInSize - zds->inPos; + int const isSkipFrame = ZSTD_isSkipFrame(zds); + size_t loadedSize; + /* At this point we shouldn't be decompressing a block that we can stream. */ + assert(neededInSize == ZSTD_nextSrcSizeToDecompressWithInputSize(zds, iend - ip)); + if (isSkipFrame) { + loadedSize = MIN(toLoad, (size_t)(iend-ip)); + } else { + RETURN_ERROR_IF(toLoad > zds->inBuffSize - zds->inPos, + corruption_detected, + "should never happen"); + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); + } + ip += loadedSize; + zds->inPos += loadedSize; + if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ + + /* decode loaded input */ + zds->inPos = 0; /* input is consumed */ + FORWARD_IF_ERROR(ZSTD_decompressContinueStream(zds, &op, oend, zds->inBuff, neededInSize), ""); + /* Function modifies the stage so we must break */ + break; + } + case zdss_flush: + { size_t const toFlushSize = zds->outEnd - zds->outStart; + size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); + op += flushedSize; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) { /* flush completed */ + zds->streamStage = zdss_read; + if ( (zds->outBuffSize < zds->fParams.frameContentSize) + && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) { + DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)", + (int)(zds->outBuffSize - zds->outStart), + (U32)zds->fParams.blockSizeMax); + zds->outStart = zds->outEnd = 0; + } + break; + } } + /* cannot complete flush */ + someMoreWork = 0; + break; + + default: + assert(0); /* impossible */ + RETURN_ERROR(GENERIC, "impossible to reach"); /* some compiler require default to do something */ + } } + + /* result */ + input->pos = (size_t)(ip - (const char*)(input->src)); + output->pos = (size_t)(op - (char*)(output->dst)); + + /* Update the expected output buffer for ZSTD_obm_stable. */ + zds->expectedOutBuffer = *output; + + if ((ip==istart) && (op==ostart)) { /* no forward progress */ + zds->noForwardProgress ++; + if (zds->noForwardProgress >= ZSTD_NO_FORWARD_PROGRESS_MAX) { + RETURN_ERROR_IF(op==oend, dstSize_tooSmall, ""); + RETURN_ERROR_IF(ip==iend, srcSize_wrong, ""); + assert(0); + } + } else { + zds->noForwardProgress = 0; + } + { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds); + if (!nextSrcSizeHint) { /* frame fully decoded */ + if (zds->outEnd == zds->outStart) { /* output fully flushed */ + if (zds->hostageByte) { + if (input->pos >= input->size) { + /* can't release hostage (not present) */ + zds->streamStage = zdss_read; + return 1; + } + input->pos++; /* release hostage */ + } /* zds->hostageByte */ + return 0; + } /* zds->outEnd == zds->outStart */ + if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ + input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ + zds->hostageByte=1; + } + return 1; + } /* nextSrcSizeHint==0 */ + nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */ + assert(zds->inPos <= nextSrcSizeHint); + nextSrcSizeHint -= zds->inPos; /* part already loaded*/ + return nextSrcSizeHint; + } +} + +size_t ZSTD_decompressStream_simpleArgs ( + ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, size_t* dstPos, + const void* src, size_t srcSize, size_t* srcPos) +{ + ZSTD_outBuffer output = { dst, dstCapacity, *dstPos }; + ZSTD_inBuffer input = { src, srcSize, *srcPos }; + /* ZSTD_compress_generic() will check validity of dstPos and srcPos */ + size_t const cErr = ZSTD_decompressStream(dctx, &output, &input); + *dstPos = output.pos; + *srcPos = input.pos; + return cErr; +} diff --git a/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.c b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.c new file mode 100644 index 0000000000..ad3b3d8dbd --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.c @@ -0,0 +1,1432 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* zstd_decompress_block : + * this module takes care of decompressing _compressed_ block */ + +/*-******************************************************* +* Dependencies +*********************************************************/ +#include /* memcpy, memmove, memset */ +#include "../common/compiler.h" /* prefetch */ +#include "../common/cpu.h" /* bmi2 */ +#include "../common/mem.h" /* low level memory routines */ +#define FSE_STATIC_LINKING_ONLY +#include "../common/fse.h" +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" +#include "../common/zstd_internal.h" +#include "zstd_decompress_internal.h" /* ZSTD_DCtx */ +#include "zstd_ddict.h" /* ZSTD_DDictDictContent */ +#include "zstd_decompress_block.h" + +/*_******************************************************* +* Macros +**********************************************************/ + +/* These two optional macros force the use one way or another of the two + * ZSTD_decompressSequences implementations. You can't force in both directions + * at the same time. + */ +#if defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ + defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) +#error "Cannot force the use of the short and the long ZSTD_decompressSequences variants!" +#endif + + +/*_******************************************************* +* Memory operations +**********************************************************/ +static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } + + +/*-************************************************************* + * Block decoding + ***************************************************************/ + +/*! ZSTD_getcBlockSize() : + * Provides the size of compressed block from block header `src` */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, + blockProperties_t* bpPtr) +{ + RETURN_ERROR_IF(srcSize < ZSTD_blockHeaderSize, srcSize_wrong, ""); + + { U32 const cBlockHeader = MEM_readLE24(src); + U32 const cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); + bpPtr->origSize = cSize; /* only useful for RLE */ + if (bpPtr->blockType == bt_rle) return 1; + RETURN_ERROR_IF(bpPtr->blockType == bt_reserved, corruption_detected, ""); + return cSize; + } +} + + +/* Hidden declaration for fullbench */ +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + const void* src, size_t srcSize); +/*! ZSTD_decodeLiteralsBlock() : + * @return : nb of bytes read from src (< srcSize ) + * note : symbol not declared but exposed for fullbench */ +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ +{ + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); + RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); + + { const BYTE* const istart = (const BYTE*) src; + symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + + switch(litEncType) + { + case set_repeat: + DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); + RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); + /* fall-through */ + + case set_compressed: + RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); + { size_t lhSize, litSize, litCSize; + U32 singleStream=0; + U32 const lhlCode = (istart[0] >> 2) & 3; + U32 const lhc = MEM_readLE32(istart); + size_t hufSuccess; + switch(lhlCode) + { + case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ + /* 2 - 2 - 10 - 10 */ + singleStream = !lhlCode; + lhSize = 3; + litSize = (lhc >> 4) & 0x3FF; + litCSize = (lhc >> 14) & 0x3FF; + break; + case 2: + /* 2 - 2 - 14 - 14 */ + lhSize = 4; + litSize = (lhc >> 4) & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + /* 2 - 2 - 18 - 18 */ + lhSize = 5; + litSize = (lhc >> 4) & 0x3FFFF; + litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); + break; + } + RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); + RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); + + /* prefetch huffman table if cold */ + if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { + PREFETCH_AREA(dctx->HUFptr, sizeof(dctx->entropy.hufTable)); + } + + if (litEncType==set_repeat) { + if (singleStream) { + hufSuccess = HUF_decompress1X_usingDTable_bmi2( + dctx->litBuffer, litSize, istart+lhSize, litCSize, + dctx->HUFptr, dctx->bmi2); + } else { + hufSuccess = HUF_decompress4X_usingDTable_bmi2( + dctx->litBuffer, litSize, istart+lhSize, litCSize, + dctx->HUFptr, dctx->bmi2); + } + } else { + if (singleStream) { +#if defined(HUF_FORCE_DECOMPRESS_X2) + hufSuccess = HUF_decompress1X_DCtx_wksp( + dctx->entropy.hufTable, dctx->litBuffer, litSize, + istart+lhSize, litCSize, dctx->workspace, + sizeof(dctx->workspace)); +#else + hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( + dctx->entropy.hufTable, dctx->litBuffer, litSize, + istart+lhSize, litCSize, dctx->workspace, + sizeof(dctx->workspace), dctx->bmi2); +#endif + } else { + hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( + dctx->entropy.hufTable, dctx->litBuffer, litSize, + istart+lhSize, litCSize, dctx->workspace, + sizeof(dctx->workspace), dctx->bmi2); + } + } + + RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return litCSize + lhSize; + } + + case set_basic: + { size_t litSize, lhSize; + U32 const lhlCode = ((istart[0]) >> 2) & 3; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ + RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); + memcpy(dctx->litBuffer, istart+lhSize, litSize); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return lhSize+litSize; + } + /* direct reference into compressed stream */ + dctx->litPtr = istart+lhSize; + dctx->litSize = litSize; + return lhSize+litSize; + } + + case set_rle: + { U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t litSize, lhSize; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); + break; + } + RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); + memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize+1; + } + default: + RETURN_ERROR(corruption_detected, "impossible"); + } + } +} + +/* Default FSE distribution tables. + * These are pre-calculated FSE decoding tables using default distributions as defined in specification : + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions + * They were generated programmatically with following method : + * - start from default distributions, present in /lib/common/zstd_internal.h + * - generate tables normally, using ZSTD_buildFSETable() + * - printout the content of tables + * - pretify output, report below, test with fuzzer to ensure it's correct */ + +/* Default FSE distribution table for Literal Lengths */ +static const ZSTD_seqSymbol LL_defaultDTable[(1<tableLog = 0; + DTableH->fastMode = 0; + + cell->nbBits = 0; + cell->nextState = 0; + assert(nbAddBits < 255); + cell->nbAdditionalBits = (BYTE)nbAddBits; + cell->baseValue = baseValue; +} + + +/* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) + * cannot fail if input is valid => + * all inputs are presumed validated at this stage */ +void +ZSTD_buildFSETable(ZSTD_seqSymbol* dt, + const short* normalizedCounter, unsigned maxSymbolValue, + const U32* baseValue, const U32* nbAdditionalBits, + unsigned tableLog) +{ + ZSTD_seqSymbol* const tableDecode = dt+1; + U16 symbolNext[MaxSeq+1]; + + U32 const maxSV1 = maxSymbolValue + 1; + U32 const tableSize = 1 << tableLog; + U32 highThreshold = tableSize-1; + + /* Sanity Checks */ + assert(maxSymbolValue <= MaxSeq); + assert(tableLog <= MaxFSELog); + + /* Init, lay down lowprob symbols */ + { ZSTD_seqSymbol_header DTableH; + DTableH.tableLog = tableLog; + DTableH.fastMode = 1; + { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s= largeLimit) DTableH.fastMode=0; + assert(normalizedCounter[s]>=0); + symbolNext[s] = (U16)normalizedCounter[s]; + } } } + memcpy(dt, &DTableH, sizeof(DTableH)); + } + + /* Spread symbols */ + { U32 const tableMask = tableSize-1; + U32 const step = FSE_TABLESTEP(tableSize); + U32 s, position = 0; + for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } } + assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + } + + /* Build Decoding table */ + { U32 u; + for (u=0; u max, corruption_detected, ""); + { U32 const symbol = *(const BYTE*)src; + U32 const baseline = baseValue[symbol]; + U32 const nbBits = nbAdditionalBits[symbol]; + ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); + } + *DTablePtr = DTableSpace; + return 1; + case set_basic : + *DTablePtr = defaultTable; + return 0; + case set_repeat: + RETURN_ERROR_IF(!flagRepeatTable, corruption_detected, ""); + /* prefetch FSE table if used */ + if (ddictIsCold && (nbSeq > 24 /* heuristic */)) { + const void* const pStart = *DTablePtr; + size_t const pSize = sizeof(ZSTD_seqSymbol) * (SEQSYMBOL_TABLE_SIZE(maxLog)); + PREFETCH_AREA(pStart, pSize); + } + return 0; + case set_compressed : + { unsigned tableLog; + S16 norm[MaxSeq+1]; + size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + RETURN_ERROR_IF(FSE_isError(headerSize), corruption_detected, ""); + RETURN_ERROR_IF(tableLog > maxLog, corruption_detected, ""); + ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog); + *DTablePtr = DTableSpace; + return headerSize; + } + default : + assert(0); + RETURN_ERROR(GENERIC, "impossible"); + } +} + +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + int nbSeq; + DEBUGLOG(5, "ZSTD_decodeSeqHeaders"); + + /* check */ + RETURN_ERROR_IF(srcSize < MIN_SEQUENCES_SIZE, srcSize_wrong, ""); + + /* SeqHead */ + nbSeq = *ip++; + if (!nbSeq) { + *nbSeqPtr=0; + RETURN_ERROR_IF(srcSize != 1, srcSize_wrong, ""); + return 1; + } + if (nbSeq > 0x7F) { + if (nbSeq == 0xFF) { + RETURN_ERROR_IF(ip+2 > iend, srcSize_wrong, ""); + nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; + } else { + RETURN_ERROR_IF(ip >= iend, srcSize_wrong, ""); + nbSeq = ((nbSeq-0x80)<<8) + *ip++; + } + } + *nbSeqPtr = nbSeq; + + /* FSE table descriptors */ + RETURN_ERROR_IF(ip+1 > iend, srcSize_wrong, ""); /* minimum possible size: 1 byte for symbol encoding types */ + { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); + symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); + symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + ip++; + + /* Build DTables */ + { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, + LLtype, MaxLL, LLFSELog, + ip, iend-ip, + LL_base, LL_bits, + LL_defaultDTable, dctx->fseEntropy, + dctx->ddictIsCold, nbSeq); + RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); + ip += llhSize; + } + + { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, + OFtype, MaxOff, OffFSELog, + ip, iend-ip, + OF_base, OF_bits, + OF_defaultDTable, dctx->fseEntropy, + dctx->ddictIsCold, nbSeq); + RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); + ip += ofhSize; + } + + { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, + MLtype, MaxML, MLFSELog, + ip, iend-ip, + ML_base, ML_bits, + ML_defaultDTable, dctx->fseEntropy, + dctx->ddictIsCold, nbSeq); + RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); + ip += mlhSize; + } + } + + return ip-istart; +} + + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; + const BYTE* match; +} seq_t; + +typedef struct { + size_t state; + const ZSTD_seqSymbol* table; +} ZSTD_fseState; + +typedef struct { + BIT_DStream_t DStream; + ZSTD_fseState stateLL; + ZSTD_fseState stateOffb; + ZSTD_fseState stateML; + size_t prevOffset[ZSTD_REP_NUM]; + const BYTE* prefixStart; + const BYTE* dictEnd; + size_t pos; +} seqState_t; + +/*! ZSTD_overlapCopy8() : + * Copies 8 bytes from ip to op and updates op and ip where ip <= op. + * If the offset is < 8 then the offset is spread to at least 8 bytes. + * + * Precondition: *ip <= *op + * Postcondition: *op - *op >= 8 + */ +HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { + assert(*ip <= *op); + if (offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[offset]; + (*op)[0] = (*ip)[0]; + (*op)[1] = (*ip)[1]; + (*op)[2] = (*ip)[2]; + (*op)[3] = (*ip)[3]; + *ip += dec32table[offset]; + ZSTD_copy4(*op+4, *ip); + *ip -= sub2; + } else { + ZSTD_copy8(*op, *ip); + } + *ip += 8; + *op += 8; + assert(*op - *ip >= 8); +} + +/*! ZSTD_safecopy() : + * Specialized version of memcpy() that is allowed to READ up to WILDCOPY_OVERLENGTH past the input buffer + * and write up to 16 bytes past oend_w (op >= oend_w is allowed). + * This function is only called in the uncommon case where the sequence is near the end of the block. It + * should be fast for a single long sequence, but can be slow for several short sequences. + * + * @param ovtype controls the overlap detection + * - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart. + * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. + * The src buffer must be before the dst buffer. + */ +static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { + ptrdiff_t const diff = op - ip; + BYTE* const oend = op + length; + + assert((ovtype == ZSTD_no_overlap && (diff <= -8 || diff >= 8 || op >= oend_w)) || + (ovtype == ZSTD_overlap_src_before_dst && diff >= 0)); + + if (length < 8) { + /* Handle short lengths. */ + while (op < oend) *op++ = *ip++; + return; + } + if (ovtype == ZSTD_overlap_src_before_dst) { + /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ + assert(length >= 8); + ZSTD_overlapCopy8(&op, &ip, diff); + assert(op - ip >= 8); + assert(op <= oend); + } + + if (oend <= oend_w) { + /* No risk of overwrite. */ + ZSTD_wildcopy(op, ip, length, ovtype); + return; + } + if (op <= oend_w) { + /* Wildcopy until we get close to the end. */ + assert(oend > oend_w); + ZSTD_wildcopy(op, ip, oend_w - op, ovtype); + ip += oend_w - op; + op = oend_w; + } + /* Handle the leftovers. */ + while (op < oend) *op++ = *ip++; +} + +/* ZSTD_execSequenceEnd(): + * This version handles cases that are near the end of the output buffer. It requires + * more careful checks to make sure there is no overflow. By separating out these hard + * and unlikely cases, we can speed up the common cases. + * + * NOTE: This function needs to be fast for a single long sequence, but doesn't need + * to be optimized for many small sequences, since those fall into ZSTD_execSequence(). + */ +FORCE_NOINLINE +size_t ZSTD_execSequenceEnd(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + + /* bounds checks : careful of address space overflow in 32-bit mode */ + RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); + RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + + /* copy literals */ + ZSTD_safecopy(op, oend_w, *litPtr, sequence.litLength, ZSTD_no_overlap); + op = oLitEnd; + *litPtr = iLitEnd; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix */ + RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); + match = dictEnd - (prefixStart-match); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } } + ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); + return sequenceLength; +} + +HINT_INLINE +size_t ZSTD_execSequence(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; /* risk : address space underflow on oend=NULL */ + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + assert(op != NULL /* Precondition */); + assert(oend_w < oend /* No underflow */); + /* Handle edge cases in a slow path: + * - Read beyond end of literals + * - Match end is within WILDCOPY_OVERLIMIT of oend + * - 32-bit mode and the match length overflows + */ + if (UNLIKELY( + iLitEnd > litLimit || + oMatchEnd > oend_w || + (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) + return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + + /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ + assert(op <= oLitEnd /* No overflow */); + assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); + assert(oMatchEnd <= oend /* No underflow */); + assert(iLitEnd <= litLimit /* Literal length is in bounds */); + assert(oLitEnd <= oend_w /* Can wildcopy literals */); + assert(oMatchEnd <= oend_w /* Can wildcopy matches */); + + /* Copy Literals: + * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. + * We likely don't need the full 32-byte wildcopy. + */ + assert(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(op, (*litPtr)); + if (UNLIKELY(sequence.litLength > 16)) { + ZSTD_wildcopy(op+16, (*litPtr)+16, sequence.litLength-16, ZSTD_no_overlap); + } + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* Copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix -> go into extDict */ + RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); + match = dictEnd + (match - prefixStart); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } } + /* Match within prefix of 1 or more bytes */ + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + + /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy + * without overlap checking. + */ + if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { + /* We bet on a full wildcopy for matches, since we expect matches to be + * longer than literals (in general). In silesia, ~10% of matches are longer + * than 16 bytes. + */ + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); + return sequenceLength; + } + assert(sequence.offset < WILDCOPY_VECLEN); + + /* Copy 8 bytes and spread the offset to be >= 8. */ + ZSTD_overlapCopy8(&op, &match, sequence.offset); + + /* If the match length is > 8 bytes, then continue with the wildcopy. */ + if (sequence.matchLength > 8) { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8, ZSTD_overlap_src_before_dst); + } + return sequenceLength; +} + +static void +ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) +{ + const void* ptr = dt; + const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits", + (U32)DStatePtr->state, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +FORCE_INLINE_TEMPLATE void +ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) +{ + ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.nextState + lowBits; +} + +FORCE_INLINE_TEMPLATE void +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo) +{ + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.nextState + lowBits; +} + +/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum + * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1) + * bits before reloading. This value is the maximum number of bytes we read + * after reloading when we are decoding long offsets. + */ +#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \ + (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \ + ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \ + : 0) + +typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e; +typedef enum { ZSTD_p_noPrefetch=0, ZSTD_p_prefetch=1 } ZSTD_prefetch_e; + +FORCE_INLINE_TEMPLATE seq_t +ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets, const ZSTD_prefetch_e prefetch) +{ + seq_t seq; + ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state]; + ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state]; + ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state]; + U32 const llBase = llDInfo.baseValue; + U32 const mlBase = mlDInfo.baseValue; + U32 const ofBase = ofDInfo.baseValue; + BYTE const llBits = llDInfo.nbAdditionalBits; + BYTE const mlBits = mlDInfo.nbAdditionalBits; + BYTE const ofBits = ofDInfo.nbAdditionalBits; + BYTE const totalBits = llBits+mlBits+ofBits; + + /* sequence */ + { size_t offset; + if (ofBits > 1) { + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { + U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + } else { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } else { + U32 const ll0 = (llBase == 0); + if (LIKELY((ofBits == 0))) { + if (LIKELY(!ll0)) + offset = seqState->prevOffset[0]; + else { + offset = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + } else { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } } } + seq.offset = offset; + } + + seq.matchLength = mlBase; + if (mlBits > 0) + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); + + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + seq.litLength = llBase; + if (llBits > 0) + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); + + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + if (prefetch == ZSTD_p_prefetch) { + size_t const pos = seqState->pos + seq.litLength; + const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart; + seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. + * No consequence though : no memory access will occur, offset is only used for prefetching */ + seqState->pos = pos + seq.matchLength; + } + + /* ANS state update + * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo(). + * clang-9.2.0 does 7% worse with ZSTD_updateFseState(). + * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the + * better option, so it is the default for other compilers. But, if you + * measure that it is worse, please put up a pull request. + */ + { +#if defined(__GNUC__) && !defined(__clang__) + const int kUseUpdateFseState = 1; +#else + const int kUseUpdateFseState = 0; +#endif + if (kUseUpdateFseState) { + ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + } else { + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */ + } + } + + return seq; +} + +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION +static int ZSTD_dictionaryIsActive(ZSTD_DCtx const* dctx, BYTE const* prefixStart, BYTE const* oLitEnd) +{ + size_t const windowSize = dctx->fParams.windowSize; + /* No dictionary used. */ + if (dctx->dictContentEndForFuzzing == NULL) return 0; + /* Dictionary is our prefix. */ + if (prefixStart == dctx->dictContentBeginForFuzzing) return 1; + /* Dictionary is not our ext-dict. */ + if (dctx->dictEnd != dctx->dictContentEndForFuzzing) return 0; + /* Dictionary is not within our window size. */ + if ((size_t)(oLitEnd - prefixStart) >= windowSize) return 0; + /* Dictionary is active. */ + return 1; +} + +MEM_STATIC void ZSTD_assertValidSequence( + ZSTD_DCtx const* dctx, + BYTE const* op, BYTE const* oend, + seq_t const seq, + BYTE const* prefixStart, BYTE const* virtualStart) +{ + size_t const windowSize = dctx->fParams.windowSize; + size_t const sequenceSize = seq.litLength + seq.matchLength; + BYTE const* const oLitEnd = op + seq.litLength; + DEBUGLOG(6, "Checking sequence: litL=%u matchL=%u offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + assert(op <= oend); + assert((size_t)(oend - op) >= sequenceSize); + assert(sequenceSize <= ZSTD_BLOCKSIZE_MAX); + if (ZSTD_dictionaryIsActive(dctx, prefixStart, oLitEnd)) { + size_t const dictSize = (size_t)((char const*)dctx->dictContentEndForFuzzing - (char const*)dctx->dictContentBeginForFuzzing); + /* Offset must be within the dictionary. */ + assert(seq.offset <= (size_t)(oLitEnd - virtualStart)); + assert(seq.offset <= windowSize + dictSize); + } else { + /* Offset must be within our window. */ + assert(seq.offset <= windowSize); + } +} +#endif + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG +FORCE_INLINE_TEMPLATE size_t +DONT_VECTORIZE +ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); + const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + DEBUGLOG(5, "ZSTD_decompressSequences_body"); + (void)frame; + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + size_t error = 0; + dctx->fseEntropy = 1; + { U32 i; for (i=0; ientropy.rep[i]; } + RETURN_ERROR_IF( + ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), + corruption_detected, ""); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != NULL); + + ZSTD_STATIC_ASSERT( + BIT_DStream_unfinished < BIT_DStream_completed && + BIT_DStream_endOfBuffer < BIT_DStream_completed && + BIT_DStream_completed < BIT_DStream_overflow); + +#if defined(__GNUC__) && defined(__x86_64__) + /* Align the decompression loop to 32 + 16 bytes. + * + * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression + * speed swings based on the alignment of the decompression loop. This + * performance swing is caused by parts of the decompression loop falling + * out of the DSB. The entire decompression loop should fit in the DSB, + * when it can't we get much worse performance. You can measure if you've + * hit the good case or the bad case with this perf command for some + * compressed file test.zst: + * + * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ + * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst + * + * If you see most cycles served out of the MITE you've hit the bad case. + * If you see most cycles served out of the DSB you've hit the good case. + * If it is pretty even then you may be in an okay case. + * + * I've been able to reproduce this issue on the following CPUs: + * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 + * Use Instruments->Counters to get DSB/MITE cycles. + * I never got performance swings, but I was able to + * go from the good case of mostly DSB to half of the + * cycles served from MITE. + * - Coffeelake: Intel i9-9900k + * + * I haven't been able to reproduce the instability or DSB misses on any + * of the following CPUS: + * - Haswell + * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH + * - Skylake + * + * If you are seeing performance stability this script can help test. + * It tests on 4 commits in zstd where I saw performance change. + * + * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 + */ + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 4"); +#endif + for ( ; ; ) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset, ZSTD_p_noPrefetch); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + BIT_reloadDStream(&(seqState.DStream)); + /* gcc and clang both don't like early returns in this loop. + * gcc doesn't like early breaks either. + * Instead save an error and report it at the end. + * When there is an error, don't increment op, so we don't + * overwrite. + */ + if (UNLIKELY(ZSTD_isError(oneSeqSize))) error = oneSeqSize; + else op += oneSeqSize; + if (UNLIKELY(!--nbSeq)) break; + } + + /* check if reached exact end */ + DEBUGLOG(5, "ZSTD_decompressSequences_body: after decode loop, remaining nbSeq : %i", nbSeq); + if (ZSTD_isError(error)) return error; + RETURN_ERROR_IF(nbSeq, corruption_detected, ""); + RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); + /* save reps for next block */ + { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + + return op-ostart; +} + +static size_t +ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT +FORCE_INLINE_TEMPLATE size_t +ZSTD_decompressSequencesLong_body( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); + const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + (void)frame; + + /* Regen sequences */ + if (nbSeq) { +#define STORED_SEQS 4 +#define STORED_SEQS_MASK (STORED_SEQS-1) +#define ADVANCED_SEQS 4 + seq_t sequences[STORED_SEQS]; + int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); + seqState_t seqState; + int seqNb; + dctx->fseEntropy = 1; + { int i; for (i=0; ientropy.rep[i]; } + seqState.prefixStart = prefixStart; + seqState.pos = (size_t)(op-prefixStart); + seqState.dictEnd = dictEnd; + assert(dst != NULL); + assert(iend >= ip); + RETURN_ERROR_IF( + ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend-ip)), + corruption_detected, ""); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + /* prepare in advance */ + for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + + return op-ostart; +} + +static size_t +ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ + + + +#if DYNAMIC_BMI2 + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG +static TARGET_ATTRIBUTE("bmi2") size_t +DONT_VECTORIZE +ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT +static TARGET_ATTRIBUTE("bmi2") size_t +ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ + +#endif /* DYNAMIC_BMI2 */ + +typedef size_t (*ZSTD_decompressSequences_t)( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame); + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG +static size_t +ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + DEBUGLOG(5, "ZSTD_decompressSequences"); +#if DYNAMIC_BMI2 + if (dctx->bmi2) { + return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + } +#endif + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ + + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT +/* ZSTD_decompressSequencesLong() : + * decompression function triggered when a minimum share of offsets is considered "long", + * aka out of cache. + * note : "long" definition seems overloaded here, sometimes meaning "wider than bitstream register", and sometimes meaning "farther than memory cache distance". + * This function will try to mitigate main memory latency through the use of prefetching */ +static size_t +ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + DEBUGLOG(5, "ZSTD_decompressSequencesLong"); +#if DYNAMIC_BMI2 + if (dctx->bmi2) { + return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + } +#endif + return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +#endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT */ + + + +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ + !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) +/* ZSTD_getLongOffsetsShare() : + * condition : offTable must be valid + * @return : "share" of long offsets (arbitrarily defined as > (1<<23)) + * compared to maximum possible of (1< 22) total += 1; + } + + assert(tableLog <= OffFSELog); + total <<= (OffFSELog - tableLog); /* scale to OffFSELog */ + + return total; +} +#endif + +size_t +ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, const int frame) +{ /* blockType == blockCompressed */ + const BYTE* ip = (const BYTE*)src; + /* isLongOffset must be true if there are long offsets. + * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN. + * We don't expect that to be the case in 64-bit mode. + * In block mode, window size is not known, so we have to be conservative. + * (note: but it could be evaluated from current-lowLimit) + */ + ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || (dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)))); + DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize); + + RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); + + /* Decode literals section */ + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); + if (ZSTD_isError(litCSize)) return litCSize; + ip += litCSize; + srcSize -= litCSize; + } + + /* Build Decoding Tables */ + { + /* These macros control at build-time which decompressor implementation + * we use. If neither is defined, we do some inspection and dispatch at + * runtime. + */ +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ + !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) + int usePrefetchDecoder = dctx->ddictIsCold; +#endif + int nbSeq; + size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + srcSize -= seqHSize; + + RETURN_ERROR_IF(dst == NULL && nbSeq > 0, dstSize_tooSmall, "NULL not handled"); + +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ + !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) + if ( !usePrefetchDecoder + && (!frame || (dctx->fParams.windowSize > (1<<24))) + && (nbSeq>ADVANCED_SEQS) ) { /* could probably use a larger nbSeq limit */ + U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr); + U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */ + usePrefetchDecoder = (shareLongOffsets >= minShare); + } +#endif + + dctx->ddictIsCold = 0; + +#if !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT) && \ + !defined(ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG) + if (usePrefetchDecoder) +#endif +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); +#endif + +#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG + /* else */ + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); +#endif + } +} + + +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) +{ + if (dst != dctx->previousDstEnd) { /* not contiguous */ + dctx->dictEnd = dctx->previousDstEnd; + dctx->virtualStart = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->prefixStart)); + dctx->prefixStart = dst; + dctx->previousDstEnd = dst; + } +} + + +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t dSize; + ZSTD_checkContinuity(dctx, dst); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); + dctx->previousDstEnd = (char*)dst + dSize; + return dSize; +} diff --git a/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.h b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.h new file mode 100644 index 0000000000..bf39b7350c --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_block.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +#ifndef ZSTD_DEC_BLOCK_H +#define ZSTD_DEC_BLOCK_H + +/*-******************************************************* + * Dependencies + *********************************************************/ +#include /* size_t */ +#include "../zstd.h" /* DCtx, and some public functions */ +#include "../common/zstd_internal.h" /* blockProperties_t, and some public functions */ +#include "zstd_decompress_internal.h" /* ZSTD_seqSymbol */ + + +/* === Prototypes === */ + +/* note: prototypes already published within `zstd.h` : + * ZSTD_decompressBlock() + */ + +/* note: prototypes already published within `zstd_internal.h` : + * ZSTD_getcBlockSize() + * ZSTD_decodeSeqHeaders() + */ + + +/* ZSTD_decompressBlock_internal() : + * decompress block, starting at `src`, + * into destination buffer `dst`. + * @return : decompressed block size, + * or an error code (which can be tested using ZSTD_isError()) + */ +size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, const int frame); + +/* ZSTD_buildFSETable() : + * generate FSE decoding table for one symbol (ll, ml or off) + * this function must be called with valid parameters only + * (dt is large enough, normalizedCounter distribution total is a power of 2, max is within range, etc.) + * in which case it cannot fail. + * Internal use only. + */ +void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, + const short* normalizedCounter, unsigned maxSymbolValue, + const U32* baseValue, const U32* nbAdditionalBits, + unsigned tableLog); + + +#endif /* ZSTD_DEC_BLOCK_H */ diff --git a/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_internal.h b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_internal.h new file mode 100644 index 0000000000..9ad96c5548 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/decompress/zstd_decompress_internal.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/* zstd_decompress_internal: + * objects and definitions shared within lib/decompress modules */ + + #ifndef ZSTD_DECOMPRESS_INTERNAL_H + #define ZSTD_DECOMPRESS_INTERNAL_H + + +/*-******************************************************* + * Dependencies + *********************************************************/ +#include "../common/mem.h" /* BYTE, U16, U32 */ +#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */ + + + +/*-******************************************************* + * Constants + *********************************************************/ +static const U32 LL_base[MaxLL+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 28, 32, 40, + 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, + 0x2000, 0x4000, 0x8000, 0x10000 }; + +static const U32 OF_base[MaxOff+1] = { + 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, + 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, + 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; + +static const U32 OF_bits[MaxOff+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 }; + +static const U32 ML_base[MaxML+1] = { + 3, 4, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 37, 39, 41, 43, 47, 51, 59, + 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, + 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; + + +/*-******************************************************* + * Decompression types + *********************************************************/ + typedef struct { + U32 fastMode; + U32 tableLog; + } ZSTD_seqSymbol_header; + + typedef struct { + U16 nextState; + BYTE nbAdditionalBits; + BYTE nbBits; + U32 baseValue; + } ZSTD_seqSymbol; + + #define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log))) + +typedef struct { + ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)]; /* Note : Space reserved for FSE Tables */ + ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)]; /* is also used as temporary workspace while building hufTable during DDict creation */ + ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)]; /* and therefore must be at least HUF_DECOMPRESS_WORKSPACE_SIZE large */ + HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + U32 rep[ZSTD_REP_NUM]; +} ZSTD_entropyDTables_t; + +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, + ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, + ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, + ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; + +typedef enum { zdss_init=0, zdss_loadHeader, + zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; + +typedef enum { + ZSTD_use_indefinitely = -1, /* Use the dictionary indefinitely */ + ZSTD_dont_use = 0, /* Do not use the dictionary (if one exists free it) */ + ZSTD_use_once = 1 /* Use the dictionary once and set to ZSTD_dont_use */ +} ZSTD_dictUses_e; + +typedef enum { + ZSTD_obm_buffered = 0, /* Buffer the output */ + ZSTD_obm_stable = 1 /* ZSTD_outBuffer is stable */ +} ZSTD_outBufferMode_e; + +struct ZSTD_DCtx_s +{ + const ZSTD_seqSymbol* LLTptr; + const ZSTD_seqSymbol* MLTptr; + const ZSTD_seqSymbol* OFTptr; + const HUF_DTable* HUFptr; + ZSTD_entropyDTables_t entropy; + U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32]; /* space needed when building huffman tables */ + const void* previousDstEnd; /* detect continuity */ + const void* prefixStart; /* start of current segment */ + const void* virtualStart; /* virtual start of previous segment if it was just before current one */ + const void* dictEnd; /* end of previous segment */ + size_t expected; + ZSTD_frameHeader fParams; + U64 decodedSize; + blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */ + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + XXH64_state_t xxhState; + size_t headerSize; + ZSTD_format_e format; + const BYTE* litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + size_t staticSize; + int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ + + /* dictionary */ + ZSTD_DDict* ddictLocal; + const ZSTD_DDict* ddict; /* set by ZSTD_initDStream_usingDDict(), or ZSTD_DCtx_refDDict() */ + U32 dictID; + int ddictIsCold; /* if == 1 : dictionary is "new" for working context, and presumed "cold" (not in cpu cache) */ + ZSTD_dictUses_e dictUses; + + /* streaming */ + ZSTD_dStreamStage streamStage; + char* inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char* outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t lhSize; + void* legacyContext; + U32 previousLegacyVersion; + U32 legacyVersion; + U32 hostageByte; + int noForwardProgress; + ZSTD_outBufferMode_e outBufferMode; + ZSTD_outBuffer expectedOutBuffer; + + /* workspace */ + BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; + + size_t oversizedDuration; + +#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION + void const* dictContentBeginForFuzzing; + void const* dictContentEndForFuzzing; +#endif +}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ + + +/*-******************************************************* + * Shared internal functions + *********************************************************/ + +/*! ZSTD_loadDEntropy() : + * dict : must point at beginning of a valid zstd dictionary. + * @return : size of dictionary header (size of magic number + dict ID + entropy tables) */ +size_t ZSTD_loadDEntropy(ZSTD_entropyDTables_t* entropy, + const void* const dict, size_t const dictSize); + +/*! ZSTD_checkContinuity() : + * check if next `dst` follows previous position, where decompression ended. + * If yes, do nothing (continue on current segment). + * If not, classify previous segment as "external dictionary", and start a new segment. + * This function cannot fail. */ +void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst); + + +#endif /* ZSTD_DECOMPRESS_INTERNAL_H */ diff --git a/src/borg/algorithms/zstd/lib/deprecated/zbuff.h b/src/borg/algorithms/zstd/lib/deprecated/zbuff.h new file mode 100644 index 0000000000..03cb14a039 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/deprecated/zbuff.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* *************************************************************** +* NOTES/WARNINGS +******************************************************************/ +/* The streaming API defined here is deprecated. + * Consider migrating towards ZSTD_compressStream() API in `zstd.h` + * See 'lib/README.md'. + *****************************************************************/ + + +#if defined (__cplusplus) +extern "C" { +#endif + +#ifndef ZSTD_BUFFERED_H_23987 +#define ZSTD_BUFFERED_H_23987 + +/* ************************************* +* Dependencies +***************************************/ +#include /* size_t */ +#include "../zstd.h" /* ZSTD_CStream, ZSTD_DStream, ZSTDLIB_API */ + + +/* *************************************************************** +* Compiler specifics +*****************************************************************/ +/* Deprecation warnings */ +/* Should these warnings be a problem, + * it is generally possible to disable them, + * typically with -Wno-deprecated-declarations for gcc + * or _CRT_SECURE_NO_WARNINGS in Visual. + * Otherwise, it's also possible to define ZBUFF_DISABLE_DEPRECATE_WARNINGS + */ +#ifdef ZBUFF_DISABLE_DEPRECATE_WARNINGS +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ +#else +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZBUFF_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API +# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) +# elif defined(__GNUC__) && (__GNUC__ >= 3) +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZBUFF_DEPRECATED for this compiler") +# define ZBUFF_DEPRECATED(message) ZSTDLIB_API +# endif +#endif /* ZBUFF_DISABLE_DEPRECATE_WARNINGS */ + + +/* ************************************* +* Streaming functions +***************************************/ +/* This is the easier "buffered" streaming API, +* using an internal buffer to lift all restrictions on user-provided buffers +* which can be any size, any place, for both input and output. +* ZBUFF and ZSTD are 100% interoperable, +* frames created by one can be decoded by the other one */ + +typedef ZSTD_CStream ZBUFF_CCtx; +ZBUFF_DEPRECATED("use ZSTD_createCStream") ZBUFF_CCtx* ZBUFF_createCCtx(void); +ZBUFF_DEPRECATED("use ZSTD_freeCStream") size_t ZBUFF_freeCCtx(ZBUFF_CCtx* cctx); + +ZBUFF_DEPRECATED("use ZSTD_initCStream") size_t ZBUFF_compressInit(ZBUFF_CCtx* cctx, int compressionLevel); +ZBUFF_DEPRECATED("use ZSTD_initCStream_usingDict") size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); + +ZBUFF_DEPRECATED("use ZSTD_compressStream") size_t ZBUFF_compressContinue(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr, const void* src, size_t* srcSizePtr); +ZBUFF_DEPRECATED("use ZSTD_flushStream") size_t ZBUFF_compressFlush(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); +ZBUFF_DEPRECATED("use ZSTD_endStream") size_t ZBUFF_compressEnd(ZBUFF_CCtx* cctx, void* dst, size_t* dstCapacityPtr); + +/*-************************************************* +* Streaming compression - howto +* +* A ZBUFF_CCtx object is required to track streaming operation. +* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. +* ZBUFF_CCtx objects can be reused multiple times. +* +* Start by initializing ZBUF_CCtx. +* Use ZBUFF_compressInit() to start a new compression operation. +* Use ZBUFF_compressInitDictionary() for a compression which requires a dictionary. +* +* Use ZBUFF_compressContinue() repetitively to consume input stream. +* *srcSizePtr and *dstCapacityPtr can be any size. +* The function will report how many bytes were read or written within *srcSizePtr and *dstCapacityPtr. +* Note that it may not consume the entire input, in which case it's up to the caller to present again remaining data. +* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each call, so save its content if it matters or change @dst . +* @return : a hint to preferred nb of bytes to use as input for next function call (it's just a hint, to improve latency) +* or an error code, which can be tested using ZBUFF_isError(). +* +* At any moment, it's possible to flush whatever data remains within buffer, using ZBUFF_compressFlush(). +* The nb of bytes written into `dst` will be reported into *dstCapacityPtr. +* Note that the function cannot output more than *dstCapacityPtr, +* therefore, some content might still be left into internal buffer if *dstCapacityPtr is too small. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* ZBUFF_compressEnd() instructs to finish a frame. +* It will perform a flush and write frame epilogue. +* The epilogue is required for decoders to consider a frame completed. +* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. +* In which case, call again ZBUFF_compressFlush() to complete the flush. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* Hint : _recommended buffer_ sizes (not compulsory) : ZBUFF_recommendedCInSize() / ZBUFF_recommendedCOutSize() +* input : ZBUFF_recommendedCInSize==128 KB block size is the internal unit, use this value to reduce intermediate stages (better latency) +* output : ZBUFF_recommendedCOutSize==ZSTD_compressBound(128 KB) + 3 + 3 : ensures it's always possible to write/flush/end a full block. Skip some buffering. +* By using both, it ensures that input will be entirely consumed, and output will always contain the result, reducing intermediate buffering. +* **************************************************/ + + +typedef ZSTD_DStream ZBUFF_DCtx; +ZBUFF_DEPRECATED("use ZSTD_createDStream") ZBUFF_DCtx* ZBUFF_createDCtx(void); +ZBUFF_DEPRECATED("use ZSTD_freeDStream") size_t ZBUFF_freeDCtx(ZBUFF_DCtx* dctx); + +ZBUFF_DEPRECATED("use ZSTD_initDStream") size_t ZBUFF_decompressInit(ZBUFF_DCtx* dctx); +ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* dctx, const void* dict, size_t dictSize); + +ZBUFF_DEPRECATED("use ZSTD_decompressStream") size_t ZBUFF_decompressContinue(ZBUFF_DCtx* dctx, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr); + +/*-*************************************************************************** +* Streaming decompression howto +* +* A ZBUFF_DCtx object is required to track streaming operations. +* Use ZBUFF_createDCtx() and ZBUFF_freeDCtx() to create/release resources. +* Use ZBUFF_decompressInit() to start a new decompression operation, +* or ZBUFF_decompressInitDictionary() if decompression requires a dictionary. +* Note that ZBUFF_DCtx objects can be re-init multiple times. +* +* Use ZBUFF_decompressContinue() repetitively to consume your input. +* *srcSizePtr and *dstCapacityPtr can be any size. +* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. +* Note that it may not consume the entire input, in which case it's up to the caller to present remaining input again. +* The content of `dst` will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters, or change `dst`. +* @return : 0 when a frame is completely decoded and fully flushed, +* 1 when there is still some data left within internal buffer to flush, +* >1 when more data is expected, with value being a suggested next input size (it's just a hint, which helps latency), +* or an error code, which can be tested using ZBUFF_isError(). +* +* Hint : recommended buffer sizes (not compulsory) : ZBUFF_recommendedDInSize() and ZBUFF_recommendedDOutSize() +* output : ZBUFF_recommendedDOutSize== 128 KB block size is the internal unit, it ensures it's always possible to write a full block when decoded. +* input : ZBUFF_recommendedDInSize == 128KB + 3; +* just follow indications from ZBUFF_decompressContinue() to minimize latency. It should always be <= 128 KB + 3 . +* *******************************************************************************/ + + +/* ************************************* +* Tool functions +***************************************/ +ZBUFF_DEPRECATED("use ZSTD_isError") unsigned ZBUFF_isError(size_t errorCode); +ZBUFF_DEPRECATED("use ZSTD_getErrorName") const char* ZBUFF_getErrorName(size_t errorCode); + +/** Functions below provide recommended buffer sizes for Compression or Decompression operations. +* These sizes are just hints, they tend to offer better latency */ +ZBUFF_DEPRECATED("use ZSTD_CStreamInSize") size_t ZBUFF_recommendedCInSize(void); +ZBUFF_DEPRECATED("use ZSTD_CStreamOutSize") size_t ZBUFF_recommendedCOutSize(void); +ZBUFF_DEPRECATED("use ZSTD_DStreamInSize") size_t ZBUFF_recommendedDInSize(void); +ZBUFF_DEPRECATED("use ZSTD_DStreamOutSize") size_t ZBUFF_recommendedDOutSize(void); + +#endif /* ZSTD_BUFFERED_H_23987 */ + + +#ifdef ZBUFF_STATIC_LINKING_ONLY +#ifndef ZBUFF_STATIC_H_30298098432 +#define ZBUFF_STATIC_H_30298098432 + +/* ==================================================================================== + * The definitions in this section are considered experimental. + * They should never be used in association with a dynamic library, as they may change in the future. + * They are provided for advanced usages. + * Use them only in association with static linking. + * ==================================================================================== */ + +/*--- Dependency ---*/ +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_parameters, ZSTD_customMem */ +#include "../zstd.h" + + +/*--- Custom memory allocator ---*/ +/*! ZBUFF_createCCtx_advanced() : + * Create a ZBUFF compression context using external alloc and free functions */ +ZBUFF_DEPRECATED("use ZSTD_createCStream_advanced") ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem); + +/*! ZBUFF_createDCtx_advanced() : + * Create a ZBUFF decompression context using external alloc and free functions */ +ZBUFF_DEPRECATED("use ZSTD_createDStream_advanced") ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem); + + +/*--- Advanced Streaming Initialization ---*/ +ZBUFF_DEPRECATED("use ZSTD_initDStream_usingDict") size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize); + + +#endif /* ZBUFF_STATIC_H_30298098432 */ +#endif /* ZBUFF_STATIC_LINKING_ONLY */ + + +#if defined (__cplusplus) +} +#endif diff --git a/src/borg/algorithms/zstd/lib/deprecated/zbuff_common.c b/src/borg/algorithms/zstd/lib/deprecated/zbuff_common.c new file mode 100644 index 0000000000..579bc4df14 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/deprecated/zbuff_common.c @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/*-************************************* +* Dependencies +***************************************/ +#include "../common/error_private.h" +#include "zbuff.h" + +/*-**************************************** +* ZBUFF Error Management (deprecated) +******************************************/ + +/*! ZBUFF_isError() : +* tells if a return value is an error code */ +unsigned ZBUFF_isError(size_t errorCode) { return ERR_isError(errorCode); } +/*! ZBUFF_getErrorName() : +* provides error code string from function result (useful for debugging) */ +const char* ZBUFF_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } diff --git a/src/borg/algorithms/zstd/lib/deprecated/zbuff_compress.c b/src/borg/algorithms/zstd/lib/deprecated/zbuff_compress.c new file mode 100644 index 0000000000..2d20b13775 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/deprecated/zbuff_compress.c @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + + +/* ************************************* +* Dependencies +***************************************/ +#define ZBUFF_STATIC_LINKING_ONLY +#include "zbuff.h" + + +/*-*********************************************************** +* Streaming compression +* +* A ZBUFF_CCtx object is required to track streaming operation. +* Use ZBUFF_createCCtx() and ZBUFF_freeCCtx() to create/release resources. +* Use ZBUFF_compressInit() to start a new compression operation. +* ZBUFF_CCtx objects can be reused multiple times. +* +* Use ZBUFF_compressContinue() repetitively to consume your input. +* *srcSizePtr and *dstCapacityPtr can be any size. +* The function will report how many bytes were read or written by modifying *srcSizePtr and *dstCapacityPtr. +* Note that it may not consume the entire input, in which case it's up to the caller to call again the function with remaining input. +* The content of dst will be overwritten (up to *dstCapacityPtr) at each function call, so save its content if it matters or change dst . +* @return : a hint to preferred nb of bytes to use as input for next function call (it's only a hint, to improve latency) +* or an error code, which can be tested using ZBUFF_isError(). +* +* ZBUFF_compressFlush() can be used to instruct ZBUFF to compress and output whatever remains within its buffer. +* Note that it will not output more than *dstCapacityPtr. +* Therefore, some content might still be left into its internal buffer if dst buffer is too small. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* ZBUFF_compressEnd() instructs to finish a frame. +* It will perform a flush and write frame epilogue. +* Similar to ZBUFF_compressFlush(), it may not be able to output the entire internal buffer content if *dstCapacityPtr is too small. +* @return : nb of bytes still present into internal buffer (0 if it's empty) +* or an error code, which can be tested using ZBUFF_isError(). +* +* Hint : recommended buffer sizes (not compulsory) +* input : ZSTD_BLOCKSIZE_MAX (128 KB), internal unit size, it improves latency to use this value. +* output : ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + ZBUFF_endFrameSize : ensures it's always possible to write/flush/end a full block at best speed. +* ***********************************************************/ + +ZBUFF_CCtx* ZBUFF_createCCtx(void) +{ + return ZSTD_createCStream(); +} + +ZBUFF_CCtx* ZBUFF_createCCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createCStream_advanced(customMem); +} + +size_t ZBUFF_freeCCtx(ZBUFF_CCtx* zbc) +{ + return ZSTD_freeCStream(zbc); +} + + +/* ====== Initialization ====== */ + +size_t ZBUFF_compressInit_advanced(ZBUFF_CCtx* zbc, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN; /* preserve "0 == unknown" behavior */ + return ZSTD_initCStream_advanced(zbc, dict, dictSize, params, pledgedSrcSize); +} + + +size_t ZBUFF_compressInitDictionary(ZBUFF_CCtx* zbc, const void* dict, size_t dictSize, int compressionLevel) +{ + return ZSTD_initCStream_usingDict(zbc, dict, dictSize, compressionLevel); +} + +size_t ZBUFF_compressInit(ZBUFF_CCtx* zbc, int compressionLevel) +{ + return ZSTD_initCStream(zbc, compressionLevel); +} + +/* ====== Compression ====== */ + + +size_t ZBUFF_compressContinue(ZBUFF_CCtx* zbc, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr) +{ + size_t result; + ZSTD_outBuffer outBuff; + ZSTD_inBuffer inBuff; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + inBuff.src = src; + inBuff.pos = 0; + inBuff.size = *srcSizePtr; + result = ZSTD_compressStream(zbc, &outBuff, &inBuff); + *dstCapacityPtr = outBuff.pos; + *srcSizePtr = inBuff.pos; + return result; +} + + + +/* ====== Finalize ====== */ + +size_t ZBUFF_compressFlush(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) +{ + size_t result; + ZSTD_outBuffer outBuff; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + result = ZSTD_flushStream(zbc, &outBuff); + *dstCapacityPtr = outBuff.pos; + return result; +} + + +size_t ZBUFF_compressEnd(ZBUFF_CCtx* zbc, void* dst, size_t* dstCapacityPtr) +{ + size_t result; + ZSTD_outBuffer outBuff; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + result = ZSTD_endStream(zbc, &outBuff); + *dstCapacityPtr = outBuff.pos; + return result; +} + + + +/* ************************************* +* Tool functions +***************************************/ +size_t ZBUFF_recommendedCInSize(void) { return ZSTD_CStreamInSize(); } +size_t ZBUFF_recommendedCOutSize(void) { return ZSTD_CStreamOutSize(); } diff --git a/src/borg/algorithms/zstd/lib/deprecated/zbuff_decompress.c b/src/borg/algorithms/zstd/lib/deprecated/zbuff_decompress.c new file mode 100644 index 0000000000..d3c49e84b8 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/deprecated/zbuff_decompress.c @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + + +/* ************************************* +* Dependencies +***************************************/ +#define ZBUFF_STATIC_LINKING_ONLY +#include "zbuff.h" + + +ZBUFF_DCtx* ZBUFF_createDCtx(void) +{ + return ZSTD_createDStream(); +} + +ZBUFF_DCtx* ZBUFF_createDCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDStream_advanced(customMem); +} + +size_t ZBUFF_freeDCtx(ZBUFF_DCtx* zbd) +{ + return ZSTD_freeDStream(zbd); +} + + +/* *** Initialization *** */ + +size_t ZBUFF_decompressInitDictionary(ZBUFF_DCtx* zbd, const void* dict, size_t dictSize) +{ + return ZSTD_initDStream_usingDict(zbd, dict, dictSize); +} + +size_t ZBUFF_decompressInit(ZBUFF_DCtx* zbd) +{ + return ZSTD_initDStream(zbd); +} + + +/* *** Decompression *** */ + +size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbd, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr) +{ + ZSTD_outBuffer outBuff; + ZSTD_inBuffer inBuff; + size_t result; + outBuff.dst = dst; + outBuff.pos = 0; + outBuff.size = *dstCapacityPtr; + inBuff.src = src; + inBuff.pos = 0; + inBuff.size = *srcSizePtr; + result = ZSTD_decompressStream(zbd, &outBuff, &inBuff); + *dstCapacityPtr = outBuff.pos; + *srcSizePtr = inBuff.pos; + return result; +} + + +/* ************************************* +* Tool functions +***************************************/ +size_t ZBUFF_recommendedDInSize(void) { return ZSTD_DStreamInSize(); } +size_t ZBUFF_recommendedDOutSize(void) { return ZSTD_DStreamOutSize(); } diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/cover.c b/src/borg/algorithms/zstd/lib/dictBuilder/cover.c new file mode 100644 index 0000000000..da54ef15f2 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/cover.c @@ -0,0 +1,1236 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/* ***************************************************************************** + * Constructs a dictionary using a heuristic based on the following paper: + * + * Liao, Petri, Moffat, Wirth + * Effective Construction of Relative Lempel-Ziv Dictionaries + * Published in WWW 2016. + * + * Adapted from code originally written by @ot (Giuseppe Ottaviano). + ******************************************************************************/ + +/*-************************************* +* Dependencies +***************************************/ +#include /* fprintf */ +#include /* malloc, free, qsort */ +#include /* memset */ +#include /* clock */ + +#include "../common/mem.h" /* read */ +#include "../common/pool.h" +#include "../common/threading.h" +#include "cover.h" +#include "../common/zstd_internal.h" /* includes zstd.h */ +#ifndef ZDICT_STATIC_LINKING_ONLY +#define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" + +/*-************************************* +* Constants +***************************************/ +#define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) +#define DEFAULT_SPLITPOINT 1.0 + +/*-************************************* +* Console display +***************************************/ +static int g_displayLevel = 2; +#define DISPLAY(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fflush(stderr); \ + } +#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + DISPLAY(__VA_ARGS__); \ + } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ +#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) + +#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ + g_time = clock(); \ + DISPLAY(__VA_ARGS__); \ + } \ + } +#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) +static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; +static clock_t g_time = 0; + +/*-************************************* +* Hash table +*************************************** +* A small specialized hash map for storing activeDmers. +* The map does not resize, so if it becomes full it will loop forever. +* Thus, the map must be large enough to store every value. +* The map implements linear probing and keeps its load less than 0.5. +*/ + +#define MAP_EMPTY_VALUE ((U32)-1) +typedef struct COVER_map_pair_t_s { + U32 key; + U32 value; +} COVER_map_pair_t; + +typedef struct COVER_map_s { + COVER_map_pair_t *data; + U32 sizeLog; + U32 size; + U32 sizeMask; +} COVER_map_t; + +/** + * Clear the map. + */ +static void COVER_map_clear(COVER_map_t *map) { + memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t)); +} + +/** + * Initializes a map of the given size. + * Returns 1 on success and 0 on failure. + * The map must be destroyed with COVER_map_destroy(). + * The map is only guaranteed to be large enough to hold size elements. + */ +static int COVER_map_init(COVER_map_t *map, U32 size) { + map->sizeLog = ZSTD_highbit32(size) + 2; + map->size = (U32)1 << map->sizeLog; + map->sizeMask = map->size - 1; + map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t)); + if (!map->data) { + map->sizeLog = 0; + map->size = 0; + return 0; + } + COVER_map_clear(map); + return 1; +} + +/** + * Internal hash function + */ +static const U32 prime4bytes = 2654435761U; +static U32 COVER_map_hash(COVER_map_t *map, U32 key) { + return (key * prime4bytes) >> (32 - map->sizeLog); +} + +/** + * Helper function that returns the index that a key should be placed into. + */ +static U32 COVER_map_index(COVER_map_t *map, U32 key) { + const U32 hash = COVER_map_hash(map, key); + U32 i; + for (i = hash;; i = (i + 1) & map->sizeMask) { + COVER_map_pair_t *pos = &map->data[i]; + if (pos->value == MAP_EMPTY_VALUE) { + return i; + } + if (pos->key == key) { + return i; + } + } +} + +/** + * Returns the pointer to the value for key. + * If key is not in the map, it is inserted and the value is set to 0. + * The map must not be full. + */ +static U32 *COVER_map_at(COVER_map_t *map, U32 key) { + COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)]; + if (pos->value == MAP_EMPTY_VALUE) { + pos->key = key; + pos->value = 0; + } + return &pos->value; +} + +/** + * Deletes key from the map if present. + */ +static void COVER_map_remove(COVER_map_t *map, U32 key) { + U32 i = COVER_map_index(map, key); + COVER_map_pair_t *del = &map->data[i]; + U32 shift = 1; + if (del->value == MAP_EMPTY_VALUE) { + return; + } + for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) { + COVER_map_pair_t *const pos = &map->data[i]; + /* If the position is empty we are done */ + if (pos->value == MAP_EMPTY_VALUE) { + del->value = MAP_EMPTY_VALUE; + return; + } + /* If pos can be moved to del do so */ + if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) { + del->key = pos->key; + del->value = pos->value; + del = pos; + shift = 1; + } else { + ++shift; + } + } +} + +/** + * Destroys a map that is inited with COVER_map_init(). + */ +static void COVER_map_destroy(COVER_map_t *map) { + if (map->data) { + free(map->data); + } + map->data = NULL; + map->size = 0; +} + +/*-************************************* +* Context +***************************************/ + +typedef struct { + const BYTE *samples; + size_t *offsets; + const size_t *samplesSizes; + size_t nbSamples; + size_t nbTrainSamples; + size_t nbTestSamples; + U32 *suffix; + size_t suffixSize; + U32 *freqs; + U32 *dmerAt; + unsigned d; +} COVER_ctx_t; + +/* We need a global context for qsort... */ +static COVER_ctx_t *g_ctx = NULL; + +/*-************************************* +* Helper functions +***************************************/ + +/** + * Returns the sum of the sample sizes. + */ +size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) { + size_t sum = 0; + unsigned i; + for (i = 0; i < nbSamples; ++i) { + sum += samplesSizes[i]; + } + return sum; +} + +/** + * Returns -1 if the dmer at lp is less than the dmer at rp. + * Return 0 if the dmers at lp and rp are equal. + * Returns 1 if the dmer at lp is greater than the dmer at rp. + */ +static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) { + U32 const lhs = *(U32 const *)lp; + U32 const rhs = *(U32 const *)rp; + return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d); +} +/** + * Faster version for d <= 8. + */ +static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) { + U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1); + U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask; + U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask; + if (lhs < rhs) { + return -1; + } + return (lhs > rhs); +} + +/** + * Same as COVER_cmp() except ties are broken by pointer value + * NOTE: g_ctx must be set to call this function. A global is required because + * qsort doesn't take an opaque pointer. + */ +static int COVER_strict_cmp(const void *lp, const void *rp) { + int result = COVER_cmp(g_ctx, lp, rp); + if (result == 0) { + result = lp < rp ? -1 : 1; + } + return result; +} +/** + * Faster version for d <= 8. + */ +static int COVER_strict_cmp8(const void *lp, const void *rp) { + int result = COVER_cmp8(g_ctx, lp, rp); + if (result == 0) { + result = lp < rp ? -1 : 1; + } + return result; +} + +/** + * Returns the first pointer in [first, last) whose element does not compare + * less than value. If no such element exists it returns last. + */ +static const size_t *COVER_lower_bound(const size_t *first, const size_t *last, + size_t value) { + size_t count = last - first; + while (count != 0) { + size_t step = count / 2; + const size_t *ptr = first; + ptr += step; + if (*ptr < value) { + first = ++ptr; + count -= step + 1; + } else { + count = step; + } + } + return first; +} + +/** + * Generic groupBy function. + * Groups an array sorted by cmp into groups with equivalent values. + * Calls grp for each group. + */ +static void +COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx, + int (*cmp)(COVER_ctx_t *, const void *, const void *), + void (*grp)(COVER_ctx_t *, const void *, const void *)) { + const BYTE *ptr = (const BYTE *)data; + size_t num = 0; + while (num < count) { + const BYTE *grpEnd = ptr + size; + ++num; + while (num < count && cmp(ctx, ptr, grpEnd) == 0) { + grpEnd += size; + ++num; + } + grp(ctx, ptr, grpEnd); + ptr = grpEnd; + } +} + +/*-************************************* +* Cover functions +***************************************/ + +/** + * Called on each group of positions with the same dmer. + * Counts the frequency of each dmer and saves it in the suffix array. + * Fills `ctx->dmerAt`. + */ +static void COVER_group(COVER_ctx_t *ctx, const void *group, + const void *groupEnd) { + /* The group consists of all the positions with the same first d bytes. */ + const U32 *grpPtr = (const U32 *)group; + const U32 *grpEnd = (const U32 *)groupEnd; + /* The dmerId is how we will reference this dmer. + * This allows us to map the whole dmer space to a much smaller space, the + * size of the suffix array. + */ + const U32 dmerId = (U32)(grpPtr - ctx->suffix); + /* Count the number of samples this dmer shows up in */ + U32 freq = 0; + /* Details */ + const size_t *curOffsetPtr = ctx->offsets; + const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples; + /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a + * different sample than the last. + */ + size_t curSampleEnd = ctx->offsets[0]; + for (; grpPtr != grpEnd; ++grpPtr) { + /* Save the dmerId for this position so we can get back to it. */ + ctx->dmerAt[*grpPtr] = dmerId; + /* Dictionaries only help for the first reference to the dmer. + * After that zstd can reference the match from the previous reference. + * So only count each dmer once for each sample it is in. + */ + if (*grpPtr < curSampleEnd) { + continue; + } + freq += 1; + /* Binary search to find the end of the sample *grpPtr is in. + * In the common case that grpPtr + 1 == grpEnd we can skip the binary + * search because the loop is over. + */ + if (grpPtr + 1 != grpEnd) { + const size_t *sampleEndPtr = + COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr); + curSampleEnd = *sampleEndPtr; + curOffsetPtr = sampleEndPtr + 1; + } + } + /* At this point we are never going to look at this segment of the suffix + * array again. We take advantage of this fact to save memory. + * We store the frequency of the dmer in the first position of the group, + * which is dmerId. + */ + ctx->suffix[dmerId] = freq; +} + + +/** + * Selects the best segment in an epoch. + * Segments of are scored according to the function: + * + * Let F(d) be the frequency of dmer d. + * Let S_i be the dmer at position i of segment S which has length k. + * + * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) + * + * Once the dmer d is in the dictionary we set F(d) = 0. + */ +static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs, + COVER_map_t *activeDmers, U32 begin, + U32 end, + ZDICT_cover_params_t parameters) { + /* Constants */ + const U32 k = parameters.k; + const U32 d = parameters.d; + const U32 dmersInK = k - d + 1; + /* Try each segment (activeSegment) and save the best (bestSegment) */ + COVER_segment_t bestSegment = {0, 0, 0}; + COVER_segment_t activeSegment; + /* Reset the activeDmers in the segment */ + COVER_map_clear(activeDmers); + /* The activeSegment starts at the beginning of the epoch. */ + activeSegment.begin = begin; + activeSegment.end = begin; + activeSegment.score = 0; + /* Slide the activeSegment through the whole epoch. + * Save the best segment in bestSegment. + */ + while (activeSegment.end < end) { + /* The dmerId for the dmer at the next position */ + U32 newDmer = ctx->dmerAt[activeSegment.end]; + /* The entry in activeDmers for this dmerId */ + U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer); + /* If the dmer isn't already present in the segment add its score. */ + if (*newDmerOcc == 0) { + /* The paper suggest using the L-0.5 norm, but experiments show that it + * doesn't help. + */ + activeSegment.score += freqs[newDmer]; + } + /* Add the dmer to the segment */ + activeSegment.end += 1; + *newDmerOcc += 1; + + /* If the window is now too large, drop the first position */ + if (activeSegment.end - activeSegment.begin == dmersInK + 1) { + U32 delDmer = ctx->dmerAt[activeSegment.begin]; + U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer); + activeSegment.begin += 1; + *delDmerOcc -= 1; + /* If this is the last occurrence of the dmer, subtract its score */ + if (*delDmerOcc == 0) { + COVER_map_remove(activeDmers, delDmer); + activeSegment.score -= freqs[delDmer]; + } + } + + /* If this segment is the best so far save it */ + if (activeSegment.score > bestSegment.score) { + bestSegment = activeSegment; + } + } + { + /* Trim off the zero frequency head and tail from the segment. */ + U32 newBegin = bestSegment.end; + U32 newEnd = bestSegment.begin; + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + U32 freq = freqs[ctx->dmerAt[pos]]; + if (freq != 0) { + newBegin = MIN(newBegin, pos); + newEnd = pos + 1; + } + } + bestSegment.begin = newBegin; + bestSegment.end = newEnd; + } + { + /* Zero out the frequency of each dmer covered by the chosen segment. */ + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + freqs[ctx->dmerAt[pos]] = 0; + } + } + return bestSegment; +} + +/** + * Check the validity of the parameters. + * Returns non-zero if the parameters are valid and 0 otherwise. + */ +static int COVER_checkParameters(ZDICT_cover_params_t parameters, + size_t maxDictSize) { + /* k and d are required parameters */ + if (parameters.d == 0 || parameters.k == 0) { + return 0; + } + /* k <= maxDictSize */ + if (parameters.k > maxDictSize) { + return 0; + } + /* d <= k */ + if (parameters.d > parameters.k) { + return 0; + } + /* 0 < splitPoint <= 1 */ + if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){ + return 0; + } + return 1; +} + +/** + * Clean up a context initialized with `COVER_ctx_init()`. + */ +static void COVER_ctx_destroy(COVER_ctx_t *ctx) { + if (!ctx) { + return; + } + if (ctx->suffix) { + free(ctx->suffix); + ctx->suffix = NULL; + } + if (ctx->freqs) { + free(ctx->freqs); + ctx->freqs = NULL; + } + if (ctx->dmerAt) { + free(ctx->dmerAt); + ctx->dmerAt = NULL; + } + if (ctx->offsets) { + free(ctx->offsets); + ctx->offsets = NULL; + } +} + +/** + * Prepare a context for dictionary building. + * The context is only dependent on the parameter `d` and can used multiple + * times. + * Returns 0 on success or error code on error. + * The context must be destroyed with `COVER_ctx_destroy()`. + */ +static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, + unsigned d, double splitPoint) { + const BYTE *const samples = (const BYTE *)samplesBuffer; + const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); + /* Split samples into testing and training sets */ + const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; + const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; + const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; + const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + /* Checks */ + if (totalSamplesSize < MAX(d, sizeof(U64)) || + totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) { + DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", + (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20)); + return ERROR(srcSize_wrong); + } + /* Check if there are at least 5 training samples */ + if (nbTrainSamples < 5) { + DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples); + return ERROR(srcSize_wrong); + } + /* Check if there's testing sample */ + if (nbTestSamples < 1) { + DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples); + return ERROR(srcSize_wrong); + } + /* Zero the context */ + memset(ctx, 0, sizeof(*ctx)); + DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, + (unsigned)trainingSamplesSize); + DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, + (unsigned)testSamplesSize); + ctx->samples = samples; + ctx->samplesSizes = samplesSizes; + ctx->nbSamples = nbSamples; + ctx->nbTrainSamples = nbTrainSamples; + ctx->nbTestSamples = nbTestSamples; + /* Partial suffix array */ + ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; + ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + /* Maps index to the dmerID */ + ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + /* The offsets of each file */ + ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t)); + if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) { + DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n"); + COVER_ctx_destroy(ctx); + return ERROR(memory_allocation); + } + ctx->freqs = NULL; + ctx->d = d; + + /* Fill offsets from the samplesSizes */ + { + U32 i; + ctx->offsets[0] = 0; + for (i = 1; i <= nbSamples; ++i) { + ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; + } + } + DISPLAYLEVEL(2, "Constructing partial suffix array\n"); + { + /* suffix is a partial suffix array. + * It only sorts suffixes by their first parameters.d bytes. + * The sort is stable, so each dmer group is sorted by position in input. + */ + U32 i; + for (i = 0; i < ctx->suffixSize; ++i) { + ctx->suffix[i] = i; + } + /* qsort doesn't take an opaque pointer, so pass as a global. + * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is. + */ + g_ctx = ctx; +#if defined(__OpenBSD__) + mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); +#else + qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), + (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp)); +#endif + } + DISPLAYLEVEL(2, "Computing frequencies\n"); + /* For each dmer group (group of positions with the same first d bytes): + * 1. For each position we set dmerAt[position] = dmerID. The dmerID is + * (groupBeginPtr - suffix). This allows us to go from position to + * dmerID so we can look up values in freq. + * 2. We calculate how many samples the dmer occurs in and save it in + * freqs[dmerId]. + */ + COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, + (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group); + ctx->freqs = ctx->suffix; + ctx->suffix = NULL; + return 0; +} + +void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel) +{ + const double ratio = (double)nbDmers / maxDictSize; + if (ratio >= 10) { + return; + } + LOCALDISPLAYLEVEL(displayLevel, 1, + "WARNING: The maximum dictionary size %u is too large " + "compared to the source size %u! " + "size(source)/size(dictionary) = %f, but it should be >= " + "10! This may lead to a subpar dictionary! We recommend " + "training on sources at least 10x, and preferably 100x " + "the size of the dictionary! \n", (U32)maxDictSize, + (U32)nbDmers, ratio); +} + +COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, + U32 nbDmers, U32 k, U32 passes) +{ + const U32 minEpochSize = k * 10; + COVER_epoch_info_t epochs; + epochs.num = MAX(1, maxDictSize / k / passes); + epochs.size = nbDmers / epochs.num; + if (epochs.size >= minEpochSize) { + assert(epochs.size * epochs.num <= nbDmers); + return epochs; + } + epochs.size = MIN(minEpochSize, nbDmers); + epochs.num = nbDmers / epochs.size; + assert(epochs.size * epochs.num <= nbDmers); + return epochs; +} + +/** + * Given the prepared context build the dictionary. + */ +static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs, + COVER_map_t *activeDmers, void *dictBuffer, + size_t dictBufferCapacity, + ZDICT_cover_params_t parameters) { + BYTE *const dict = (BYTE *)dictBuffer; + size_t tail = dictBufferCapacity; + /* Divide the data into epochs. We will select one segment from each epoch. */ + const COVER_epoch_info_t epochs = COVER_computeEpochs( + (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4); + const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3)); + size_t zeroScoreRun = 0; + size_t epoch; + DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", + (U32)epochs.num, (U32)epochs.size); + /* Loop through the epochs until there are no more segments or the dictionary + * is full. + */ + for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { + const U32 epochBegin = (U32)(epoch * epochs.size); + const U32 epochEnd = epochBegin + epochs.size; + size_t segmentSize; + /* Select a segment */ + COVER_segment_t segment = COVER_selectSegment( + ctx, freqs, activeDmers, epochBegin, epochEnd, parameters); + /* If the segment covers no dmers, then we are out of content. + * There may be new content in other epochs, for continue for some time. + */ + if (segment.score == 0) { + if (++zeroScoreRun >= maxZeroScoreRun) { + break; + } + continue; + } + zeroScoreRun = 0; + /* Trim the segment if necessary and if it is too small then we are done */ + segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); + if (segmentSize < parameters.d) { + break; + } + /* We fill the dictionary from the back to allow the best segments to be + * referenced with the smallest offsets. + */ + tail -= segmentSize; + memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); + DISPLAYUPDATE( + 2, "\r%u%% ", + (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); + } + DISPLAYLEVEL(2, "\r%79s\r", ""); + return tail; +} + +ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( + void *dictBuffer, size_t dictBufferCapacity, + const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t parameters) +{ + BYTE* const dict = (BYTE*)dictBuffer; + COVER_ctx_t ctx; + COVER_map_t activeDmers; + parameters.splitPoint = 1.0; + /* Initialize global data */ + g_displayLevel = parameters.zParams.notificationLevel; + /* Checks */ + if (!COVER_checkParameters(parameters, dictBufferCapacity)) { + DISPLAYLEVEL(1, "Cover parameters incorrect\n"); + return ERROR(parameter_outOfBound); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "Cover must have at least one input file\n"); + return ERROR(srcSize_wrong); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + /* Initialize context and activeDmers */ + { + size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, + parameters.d, parameters.splitPoint); + if (ZSTD_isError(initVal)) { + return initVal; + } + } + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel); + if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { + DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); + COVER_ctx_destroy(&ctx); + return ERROR(memory_allocation); + } + + DISPLAYLEVEL(2, "Building dictionary\n"); + { + const size_t tail = + COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer, + dictBufferCapacity, parameters); + const size_t dictionarySize = ZDICT_finalizeDictionary( + dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, + samplesBuffer, samplesSizes, nbSamples, parameters.zParams); + if (!ZSTD_isError(dictionarySize)) { + DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", + (unsigned)dictionarySize); + } + COVER_ctx_destroy(&ctx); + COVER_map_destroy(&activeDmers); + return dictionarySize; + } +} + + + +size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, + const size_t *samplesSizes, const BYTE *samples, + size_t *offsets, + size_t nbTrainSamples, size_t nbSamples, + BYTE *const dict, size_t dictBufferCapacity) { + size_t totalCompressedSize = ERROR(GENERIC); + /* Pointers */ + ZSTD_CCtx *cctx; + ZSTD_CDict *cdict; + void *dst; + /* Local variables */ + size_t dstCapacity; + size_t i; + /* Allocate dst with enough space to compress the maximum sized sample */ + { + size_t maxSampleSize = 0; + i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; + for (; i < nbSamples; ++i) { + maxSampleSize = MAX(samplesSizes[i], maxSampleSize); + } + dstCapacity = ZSTD_compressBound(maxSampleSize); + dst = malloc(dstCapacity); + } + /* Create the cctx and cdict */ + cctx = ZSTD_createCCtx(); + cdict = ZSTD_createCDict(dict, dictBufferCapacity, + parameters.zParams.compressionLevel); + if (!dst || !cctx || !cdict) { + goto _compressCleanup; + } + /* Compress each sample and sum their sizes (or error) */ + totalCompressedSize = dictBufferCapacity; + i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0; + for (; i < nbSamples; ++i) { + const size_t size = ZSTD_compress_usingCDict( + cctx, dst, dstCapacity, samples + offsets[i], + samplesSizes[i], cdict); + if (ZSTD_isError(size)) { + totalCompressedSize = size; + goto _compressCleanup; + } + totalCompressedSize += size; + } +_compressCleanup: + ZSTD_freeCCtx(cctx); + ZSTD_freeCDict(cdict); + if (dst) { + free(dst); + } + return totalCompressedSize; +} + + +/** + * Initialize the `COVER_best_t`. + */ +void COVER_best_init(COVER_best_t *best) { + if (best==NULL) return; /* compatible with init on NULL */ + (void)ZSTD_pthread_mutex_init(&best->mutex, NULL); + (void)ZSTD_pthread_cond_init(&best->cond, NULL); + best->liveJobs = 0; + best->dict = NULL; + best->dictSize = 0; + best->compressedSize = (size_t)-1; + memset(&best->parameters, 0, sizeof(best->parameters)); +} + +/** + * Wait until liveJobs == 0. + */ +void COVER_best_wait(COVER_best_t *best) { + if (!best) { + return; + } + ZSTD_pthread_mutex_lock(&best->mutex); + while (best->liveJobs != 0) { + ZSTD_pthread_cond_wait(&best->cond, &best->mutex); + } + ZSTD_pthread_mutex_unlock(&best->mutex); +} + +/** + * Call COVER_best_wait() and then destroy the COVER_best_t. + */ +void COVER_best_destroy(COVER_best_t *best) { + if (!best) { + return; + } + COVER_best_wait(best); + if (best->dict) { + free(best->dict); + } + ZSTD_pthread_mutex_destroy(&best->mutex); + ZSTD_pthread_cond_destroy(&best->cond); +} + +/** + * Called when a thread is about to be launched. + * Increments liveJobs. + */ +void COVER_best_start(COVER_best_t *best) { + if (!best) { + return; + } + ZSTD_pthread_mutex_lock(&best->mutex); + ++best->liveJobs; + ZSTD_pthread_mutex_unlock(&best->mutex); +} + +/** + * Called when a thread finishes executing, both on error or success. + * Decrements liveJobs and signals any waiting threads if liveJobs == 0. + * If this dictionary is the best so far save it and its parameters. + */ +void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, + COVER_dictSelection_t selection) { + void* dict = selection.dictContent; + size_t compressedSize = selection.totalCompressedSize; + size_t dictSize = selection.dictSize; + if (!best) { + return; + } + { + size_t liveJobs; + ZSTD_pthread_mutex_lock(&best->mutex); + --best->liveJobs; + liveJobs = best->liveJobs; + /* If the new dictionary is better */ + if (compressedSize < best->compressedSize) { + /* Allocate space if necessary */ + if (!best->dict || best->dictSize < dictSize) { + if (best->dict) { + free(best->dict); + } + best->dict = malloc(dictSize); + if (!best->dict) { + best->compressedSize = ERROR(GENERIC); + best->dictSize = 0; + ZSTD_pthread_cond_signal(&best->cond); + ZSTD_pthread_mutex_unlock(&best->mutex); + return; + } + } + /* Save the dictionary, parameters, and size */ + if (dict) { + memcpy(best->dict, dict, dictSize); + best->dictSize = dictSize; + best->parameters = parameters; + best->compressedSize = compressedSize; + } + } + if (liveJobs == 0) { + ZSTD_pthread_cond_broadcast(&best->cond); + } + ZSTD_pthread_mutex_unlock(&best->mutex); + } +} + +COVER_dictSelection_t COVER_dictSelectionError(size_t error) { + COVER_dictSelection_t selection = { NULL, 0, error }; + return selection; +} + +unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) { + return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent); +} + +void COVER_dictSelectionFree(COVER_dictSelection_t selection){ + free(selection.dictContent); +} + +COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, + size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, + size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) { + + size_t largestDict = 0; + size_t largestCompressed = 0; + BYTE* customDictContentEnd = customDictContent + dictContentSize; + + BYTE * largestDictbuffer = (BYTE *)malloc(dictContentSize); + BYTE * candidateDictBuffer = (BYTE *)malloc(dictContentSize); + double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00; + + if (!largestDictbuffer || !candidateDictBuffer) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + /* Initial dictionary size and compressed size */ + memcpy(largestDictbuffer, customDictContent, dictContentSize); + dictContentSize = ZDICT_finalizeDictionary( + largestDictbuffer, dictContentSize, customDictContent, dictContentSize, + samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); + + if (ZDICT_isError(dictContentSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + } + + totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, + samplesBuffer, offsets, + nbCheckSamples, nbSamples, + largestDictbuffer, dictContentSize); + + if (ZSTD_isError(totalCompressedSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); + } + + if (params.shrinkDict == 0) { + COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; + free(candidateDictBuffer); + return selection; + } + + largestDict = dictContentSize; + largestCompressed = totalCompressedSize; + dictContentSize = ZDICT_DICTSIZE_MIN; + + /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */ + while (dictContentSize < largestDict) { + memcpy(candidateDictBuffer, largestDictbuffer, largestDict); + dictContentSize = ZDICT_finalizeDictionary( + candidateDictBuffer, dictContentSize, customDictContentEnd - dictContentSize, dictContentSize, + samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams); + + if (ZDICT_isError(dictContentSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(dictContentSize); + + } + + totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes, + samplesBuffer, offsets, + nbCheckSamples, nbSamples, + candidateDictBuffer, dictContentSize); + + if (ZSTD_isError(totalCompressedSize)) { + free(largestDictbuffer); + free(candidateDictBuffer); + return COVER_dictSelectionError(totalCompressedSize); + } + + if (totalCompressedSize <= largestCompressed * regressionTolerance) { + COVER_dictSelection_t selection = { candidateDictBuffer, dictContentSize, totalCompressedSize }; + free(largestDictbuffer); + return selection; + } + dictContentSize *= 2; + } + dictContentSize = largestDict; + totalCompressedSize = largestCompressed; + { + COVER_dictSelection_t selection = { largestDictbuffer, dictContentSize, totalCompressedSize }; + free(candidateDictBuffer); + return selection; + } +} + +/** + * Parameters for COVER_tryParameters(). + */ +typedef struct COVER_tryParameters_data_s { + const COVER_ctx_t *ctx; + COVER_best_t *best; + size_t dictBufferCapacity; + ZDICT_cover_params_t parameters; +} COVER_tryParameters_data_t; + +/** + * Tries a set of parameters and updates the COVER_best_t with the results. + * This function is thread safe if zstd is compiled with multithreaded support. + * It takes its parameters as an *OWNING* opaque pointer to support threading. + */ +static void COVER_tryParameters(void *opaque) { + /* Save parameters as local variables */ + COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t *)opaque; + const COVER_ctx_t *const ctx = data->ctx; + const ZDICT_cover_params_t parameters = data->parameters; + size_t dictBufferCapacity = data->dictBufferCapacity; + size_t totalCompressedSize = ERROR(GENERIC); + /* Allocate space for hash table, dict, and freqs */ + COVER_map_t activeDmers; + BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); + COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); + U32 *freqs = (U32 *)malloc(ctx->suffixSize * sizeof(U32)); + if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) { + DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n"); + goto _cleanup; + } + if (!dict || !freqs) { + DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); + goto _cleanup; + } + /* Copy the frequencies because we need to modify them */ + memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32)); + /* Build the dictionary */ + { + const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict, + dictBufferCapacity, parameters); + selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail, + ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, + totalCompressedSize); + + if (COVER_dictSelectionIsError(selection)) { + DISPLAYLEVEL(1, "Failed to select dictionary\n"); + goto _cleanup; + } + } +_cleanup: + free(dict); + COVER_best_finish(data->best, parameters, selection); + free(data); + COVER_map_destroy(&activeDmers); + COVER_dictSelectionFree(selection); + if (freqs) { + free(freqs); + } +} + +ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( + void *dictBuffer, size_t dictBufferCapacity, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t *parameters) { + /* constants */ + const unsigned nbThreads = parameters->nbThreads; + const double splitPoint = + parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; + const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; + const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; + const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; + const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; + const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; + const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); + const unsigned kIterations = + (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + const unsigned shrinkDict = 0; + /* Local variables */ + const int displayLevel = parameters->zParams.notificationLevel; + unsigned iteration = 1; + unsigned d; + unsigned k; + COVER_best_t best; + POOL_ctx *pool = NULL; + int warned = 0; + + /* Checks */ + if (splitPoint <= 0 || splitPoint > 1) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + return ERROR(parameter_outOfBound); + } + if (kMinK < kMaxD || kMaxK < kMinK) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n"); + return ERROR(parameter_outOfBound); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "Cover must have at least one input file\n"); + return ERROR(srcSize_wrong); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + if (nbThreads > 1) { + pool = POOL_create(nbThreads, 1); + if (!pool) { + return ERROR(memory_allocation); + } + } + /* Initialization */ + COVER_best_init(&best); + /* Turn down global display level to clean up display at level 2 and below */ + g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + /* Loop through d first because each new value needs a new context */ + LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", + kIterations); + for (d = kMinD; d <= kMaxD; d += 2) { + /* Initialize the context for this value of d */ + COVER_ctx_t ctx; + LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + { + const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint); + if (ZSTD_isError(initVal)) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + COVER_best_destroy(&best); + POOL_free(pool); + return initVal; + } + } + if (!warned) { + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel); + warned = 1; + } + /* Loop through k reusing the same context */ + for (k = kMinK; k <= kMaxK; k += kStepSize) { + /* Prepare the arguments */ + COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc( + sizeof(COVER_tryParameters_data_t)); + LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + if (!data) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + COVER_best_destroy(&best); + COVER_ctx_destroy(&ctx); + POOL_free(pool); + return ERROR(memory_allocation); + } + data->ctx = &ctx; + data->best = &best; + data->dictBufferCapacity = dictBufferCapacity; + data->parameters = *parameters; + data->parameters.k = k; + data->parameters.d = d; + data->parameters.splitPoint = splitPoint; + data->parameters.steps = kSteps; + data->parameters.shrinkDict = shrinkDict; + data->parameters.zParams.notificationLevel = g_displayLevel; + /* Check the parameters */ + if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) { + DISPLAYLEVEL(1, "Cover parameters incorrect\n"); + free(data); + continue; + } + /* Call the function and pass ownership of data to it */ + COVER_best_start(&best); + if (pool) { + POOL_add(pool, &COVER_tryParameters, data); + } else { + COVER_tryParameters(data); + } + /* Print status */ + LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", + (unsigned)((iteration * 100) / kIterations)); + ++iteration; + } + COVER_best_wait(&best); + COVER_ctx_destroy(&ctx); + } + LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + /* Fill the output buffer and parameters with output of the best parameters */ + { + const size_t dictSize = best.dictSize; + if (ZSTD_isError(best.compressedSize)) { + const size_t compressedSize = best.compressedSize; + COVER_best_destroy(&best); + POOL_free(pool); + return compressedSize; + } + *parameters = best.parameters; + memcpy(dictBuffer, best.dict, dictSize); + COVER_best_destroy(&best); + POOL_free(pool); + return dictSize; + } +} diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/cover.h b/src/borg/algorithms/zstd/lib/dictBuilder/cover.h new file mode 100644 index 0000000000..f2aa0e35e3 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/cover.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2017-2020, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include /* fprintf */ +#include /* malloc, free, qsort */ +#include /* memset */ +#include /* clock */ +#include "../common/mem.h" /* read */ +#include "../common/pool.h" +#include "../common/threading.h" +#include "../common/zstd_internal.h" /* includes zstd.h */ +#ifndef ZDICT_STATIC_LINKING_ONLY +#define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" + +/** + * COVER_best_t is used for two purposes: + * 1. Synchronizing threads. + * 2. Saving the best parameters and dictionary. + * + * All of the methods except COVER_best_init() are thread safe if zstd is + * compiled with multithreaded support. + */ +typedef struct COVER_best_s { + ZSTD_pthread_mutex_t mutex; + ZSTD_pthread_cond_t cond; + size_t liveJobs; + void *dict; + size_t dictSize; + ZDICT_cover_params_t parameters; + size_t compressedSize; +} COVER_best_t; + +/** + * A segment is a range in the source as well as the score of the segment. + */ +typedef struct { + U32 begin; + U32 end; + U32 score; +} COVER_segment_t; + +/** + *Number of epochs and size of each epoch. + */ +typedef struct { + U32 num; + U32 size; +} COVER_epoch_info_t; + +/** + * Struct used for the dictionary selection function. + */ +typedef struct COVER_dictSelection { + BYTE* dictContent; + size_t dictSize; + size_t totalCompressedSize; +} COVER_dictSelection_t; + +/** + * Computes the number of epochs and the size of each epoch. + * We will make sure that each epoch gets at least 10 * k bytes. + * + * The COVER algorithms divide the data up into epochs of equal size and + * select one segment from each epoch. + * + * @param maxDictSize The maximum allowed dictionary size. + * @param nbDmers The number of dmers we are training on. + * @param k The parameter k (segment size). + * @param passes The target number of passes over the dmer corpus. + * More passes means a better dictionary. + */ +COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize, U32 nbDmers, + U32 k, U32 passes); + +/** + * Warns the user when their corpus is too small. + */ +void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel); + +/** + * Checks total compressed size of a dictionary + */ +size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters, + const size_t *samplesSizes, const BYTE *samples, + size_t *offsets, + size_t nbTrainSamples, size_t nbSamples, + BYTE *const dict, size_t dictBufferCapacity); + +/** + * Returns the sum of the sample sizes. + */ +size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) ; + +/** + * Initialize the `COVER_best_t`. + */ +void COVER_best_init(COVER_best_t *best); + +/** + * Wait until liveJobs == 0. + */ +void COVER_best_wait(COVER_best_t *best); + +/** + * Call COVER_best_wait() and then destroy the COVER_best_t. + */ +void COVER_best_destroy(COVER_best_t *best); + +/** + * Called when a thread is about to be launched. + * Increments liveJobs. + */ +void COVER_best_start(COVER_best_t *best); + +/** + * Called when a thread finishes executing, both on error or success. + * Decrements liveJobs and signals any waiting threads if liveJobs == 0. + * If this dictionary is the best so far save it and its parameters. + */ +void COVER_best_finish(COVER_best_t *best, ZDICT_cover_params_t parameters, + COVER_dictSelection_t selection); +/** + * Error function for COVER_selectDict function. Checks if the return + * value is an error. + */ +unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection); + + /** + * Error function for COVER_selectDict function. Returns a struct where + * return.totalCompressedSize is a ZSTD error. + */ +COVER_dictSelection_t COVER_dictSelectionError(size_t error); + +/** + * Always call after selectDict is called to free up used memory from + * newly created dictionary. + */ +void COVER_dictSelectionFree(COVER_dictSelection_t selection); + +/** + * Called to finalize the dictionary and select one based on whether or not + * the shrink-dict flag was enabled. If enabled the dictionary used is the + * smallest dictionary within a specified regression of the compressed size + * from the largest dictionary. + */ + COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, + size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples, + size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize); diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.c b/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.c new file mode 100644 index 0000000000..ead9220442 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.c @@ -0,0 +1,1913 @@ +/* + * divsufsort.c for libdivsufsort-lite + * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/*- Compiler specifics -*/ +#ifdef __clang__ +#pragma clang diagnostic ignored "-Wshorten-64-to-32" +#endif + +#if defined(_MSC_VER) +# pragma warning(disable : 4244) +# pragma warning(disable : 4127) /* C4127 : Condition expression is constant */ +#endif + + +/*- Dependencies -*/ +#include +#include +#include + +#include "divsufsort.h" + +/*- Constants -*/ +#if defined(INLINE) +# undef INLINE +#endif +#if !defined(INLINE) +# define INLINE __inline +#endif +#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1) +# undef ALPHABET_SIZE +#endif +#if !defined(ALPHABET_SIZE) +# define ALPHABET_SIZE (256) +#endif +#define BUCKET_A_SIZE (ALPHABET_SIZE) +#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE) +#if defined(SS_INSERTIONSORT_THRESHOLD) +# if SS_INSERTIONSORT_THRESHOLD < 1 +# undef SS_INSERTIONSORT_THRESHOLD +# define SS_INSERTIONSORT_THRESHOLD (1) +# endif +#else +# define SS_INSERTIONSORT_THRESHOLD (8) +#endif +#if defined(SS_BLOCKSIZE) +# if SS_BLOCKSIZE < 0 +# undef SS_BLOCKSIZE +# define SS_BLOCKSIZE (0) +# elif 32768 <= SS_BLOCKSIZE +# undef SS_BLOCKSIZE +# define SS_BLOCKSIZE (32767) +# endif +#else +# define SS_BLOCKSIZE (1024) +#endif +/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */ +#if SS_BLOCKSIZE == 0 +# define SS_MISORT_STACKSIZE (96) +#elif SS_BLOCKSIZE <= 4096 +# define SS_MISORT_STACKSIZE (16) +#else +# define SS_MISORT_STACKSIZE (24) +#endif +#define SS_SMERGE_STACKSIZE (32) +#define TR_INSERTIONSORT_THRESHOLD (8) +#define TR_STACKSIZE (64) + + +/*- Macros -*/ +#ifndef SWAP +# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0) +#endif /* SWAP */ +#ifndef MIN +# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) +#endif /* MIN */ +#ifndef MAX +# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) +#endif /* MAX */ +#define STACK_PUSH(_a, _b, _c, _d)\ + do {\ + assert(ssize < STACK_SIZE);\ + stack[ssize].a = (_a), stack[ssize].b = (_b),\ + stack[ssize].c = (_c), stack[ssize++].d = (_d);\ + } while(0) +#define STACK_PUSH5(_a, _b, _c, _d, _e)\ + do {\ + assert(ssize < STACK_SIZE);\ + stack[ssize].a = (_a), stack[ssize].b = (_b),\ + stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\ + } while(0) +#define STACK_POP(_a, _b, _c, _d)\ + do {\ + assert(0 <= ssize);\ + if(ssize == 0) { return; }\ + (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ + (_c) = stack[ssize].c, (_d) = stack[ssize].d;\ + } while(0) +#define STACK_POP5(_a, _b, _c, _d, _e)\ + do {\ + assert(0 <= ssize);\ + if(ssize == 0) { return; }\ + (_a) = stack[--ssize].a, (_b) = stack[ssize].b,\ + (_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\ + } while(0) +#define BUCKET_A(_c0) bucket_A[(_c0)] +#if ALPHABET_SIZE == 256 +#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)]) +#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)]) +#else +#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)]) +#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)]) +#endif + + +/*- Private Functions -*/ + +static const int lg_table[256]= { + -1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, + 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 +}; + +#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) + +static INLINE +int +ss_ilg(int n) { +#if SS_BLOCKSIZE == 0 + return (n & 0xffff0000) ? + ((n & 0xff000000) ? + 24 + lg_table[(n >> 24) & 0xff] : + 16 + lg_table[(n >> 16) & 0xff]) : + ((n & 0x0000ff00) ? + 8 + lg_table[(n >> 8) & 0xff] : + 0 + lg_table[(n >> 0) & 0xff]); +#elif SS_BLOCKSIZE < 256 + return lg_table[n]; +#else + return (n & 0xff00) ? + 8 + lg_table[(n >> 8) & 0xff] : + 0 + lg_table[(n >> 0) & 0xff]; +#endif +} + +#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ + +#if SS_BLOCKSIZE != 0 + +static const int sqq_table[256] = { + 0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61, + 64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89, + 90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109, +110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, +128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, +143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155, +156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168, +169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180, +181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191, +192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201, +202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211, +212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221, +221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230, +230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238, +239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247, +247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255 +}; + +static INLINE +int +ss_isqrt(int x) { + int y, e; + + if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; } + e = (x & 0xffff0000) ? + ((x & 0xff000000) ? + 24 + lg_table[(x >> 24) & 0xff] : + 16 + lg_table[(x >> 16) & 0xff]) : + ((x & 0x0000ff00) ? + 8 + lg_table[(x >> 8) & 0xff] : + 0 + lg_table[(x >> 0) & 0xff]); + + if(e >= 16) { + y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7); + if(e >= 24) { y = (y + 1 + x / y) >> 1; } + y = (y + 1 + x / y) >> 1; + } else if(e >= 8) { + y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1; + } else { + return sqq_table[x] >> 4; + } + + return (x < (y * y)) ? y - 1 : y; +} + +#endif /* SS_BLOCKSIZE != 0 */ + + +/*---------------------------------------------------------------------------*/ + +/* Compares two suffixes. */ +static INLINE +int +ss_compare(const unsigned char *T, + const int *p1, const int *p2, + int depth) { + const unsigned char *U1, *U2, *U1n, *U2n; + + for(U1 = T + depth + *p1, + U2 = T + depth + *p2, + U1n = T + *(p1 + 1) + 2, + U2n = T + *(p2 + 1) + 2; + (U1 < U1n) && (U2 < U2n) && (*U1 == *U2); + ++U1, ++U2) { + } + + return U1 < U1n ? + (U2 < U2n ? *U1 - *U2 : 1) : + (U2 < U2n ? -1 : 0); +} + + +/*---------------------------------------------------------------------------*/ + +#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) + +/* Insertionsort for small size groups */ +static +void +ss_insertionsort(const unsigned char *T, const int *PA, + int *first, int *last, int depth) { + int *i, *j; + int t; + int r; + + for(i = last - 2; first <= i; --i) { + for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) { + do { *(j - 1) = *j; } while((++j < last) && (*j < 0)); + if(last <= j) { break; } + } + if(r == 0) { *j = ~*j; } + *(j - 1) = t; + } +} + +#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */ + + +/*---------------------------------------------------------------------------*/ + +#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) + +static INLINE +void +ss_fixdown(const unsigned char *Td, const int *PA, + int *SA, int i, int size) { + int j, k; + int v; + int c, d, e; + + for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { + d = Td[PA[SA[k = j++]]]; + if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; } + if(d <= c) { break; } + } + SA[i] = v; +} + +/* Simple top-down heapsort. */ +static +void +ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) { + int i, m; + int t; + + m = size; + if((size % 2) == 0) { + m--; + if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); } + } + + for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); } + if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); } + for(i = m - 1; 0 < i; --i) { + t = SA[0], SA[0] = SA[i]; + ss_fixdown(Td, PA, SA, 0, i); + SA[i] = t; + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Returns the median of three elements. */ +static INLINE +int * +ss_median3(const unsigned char *Td, const int *PA, + int *v1, int *v2, int *v3) { + int *t; + if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); } + if(Td[PA[*v2]] > Td[PA[*v3]]) { + if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; } + else { return v3; } + } + return v2; +} + +/* Returns the median of five elements. */ +static INLINE +int * +ss_median5(const unsigned char *Td, const int *PA, + int *v1, int *v2, int *v3, int *v4, int *v5) { + int *t; + if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); } + if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); } + if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); } + if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); } + if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); } + if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; } + return v3; +} + +/* Returns the pivot element. */ +static INLINE +int * +ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) { + int *middle; + int t; + + t = last - first; + middle = first + t / 2; + + if(t <= 512) { + if(t <= 32) { + return ss_median3(Td, PA, first, middle, last - 1); + } else { + t >>= 2; + return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1); + } + } + t >>= 3; + first = ss_median3(Td, PA, first, first + t, first + (t << 1)); + middle = ss_median3(Td, PA, middle - t, middle, middle + t); + last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1); + return ss_median3(Td, PA, first, middle, last); +} + + +/*---------------------------------------------------------------------------*/ + +/* Binary partition for substrings. */ +static INLINE +int * +ss_partition(const int *PA, + int *first, int *last, int depth) { + int *a, *b; + int t; + for(a = first - 1, b = last;;) { + for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; } + for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { } + if(b <= a) { break; } + t = ~*b; + *b = *a; + *a = t; + } + if(first < a) { *first = ~*first; } + return a; +} + +/* Multikey introsort for medium size groups. */ +static +void +ss_mintrosort(const unsigned char *T, const int *PA, + int *first, int *last, + int depth) { +#define STACK_SIZE SS_MISORT_STACKSIZE + struct { int *a, *b, c; int d; } stack[STACK_SIZE]; + const unsigned char *Td; + int *a, *b, *c, *d, *e, *f; + int s, t; + int ssize; + int limit; + int v, x = 0; + + for(ssize = 0, limit = ss_ilg(last - first);;) { + + if((last - first) <= SS_INSERTIONSORT_THRESHOLD) { +#if 1 < SS_INSERTIONSORT_THRESHOLD + if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); } +#endif + STACK_POP(first, last, depth, limit); + continue; + } + + Td = T + depth; + if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); } + if(limit < 0) { + for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) { + if((x = Td[PA[*a]]) != v) { + if(1 < (a - first)) { break; } + v = x; + first = a; + } + } + if(Td[PA[*first] - 1] < v) { + first = ss_partition(PA, first, a, depth); + } + if((a - first) <= (last - a)) { + if(1 < (a - first)) { + STACK_PUSH(a, last, depth, -1); + last = a, depth += 1, limit = ss_ilg(a - first); + } else { + first = a, limit = -1; + } + } else { + if(1 < (last - a)) { + STACK_PUSH(first, a, depth + 1, ss_ilg(a - first)); + first = a, limit = -1; + } else { + last = a, depth += 1, limit = ss_ilg(a - first); + } + } + continue; + } + + /* choose pivot */ + a = ss_pivot(Td, PA, first, last); + v = Td[PA[*a]]; + SWAP(*first, *a); + + /* partition */ + for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { } + if(((a = b) < last) && (x < v)) { + for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + } + for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { } + if((b < (d = c)) && (x > v)) { + for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + for(; b < c;) { + SWAP(*b, *c); + for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + + if(a <= d) { + c = b - 1; + + if((s = a - first) > (t = b - a)) { s = t; } + for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + if((s = d - c) > (t = last - d - 1)) { s = t; } + for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + + a = first + (b - a), c = last - (d - c); + b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth); + + if((a - first) <= (last - c)) { + if((last - c) <= (c - b)) { + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(c, last, depth, limit); + last = a; + } else if((a - first) <= (c - b)) { + STACK_PUSH(c, last, depth, limit); + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + last = a; + } else { + STACK_PUSH(c, last, depth, limit); + STACK_PUSH(first, a, depth, limit); + first = b, last = c, depth += 1, limit = ss_ilg(c - b); + } + } else { + if((a - first) <= (c - b)) { + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + STACK_PUSH(first, a, depth, limit); + first = c; + } else if((last - c) <= (c - b)) { + STACK_PUSH(first, a, depth, limit); + STACK_PUSH(b, c, depth + 1, ss_ilg(c - b)); + first = c; + } else { + STACK_PUSH(first, a, depth, limit); + STACK_PUSH(c, last, depth, limit); + first = b, last = c, depth += 1, limit = ss_ilg(c - b); + } + } + } else { + limit += 1; + if(Td[PA[*first] - 1] < v) { + first = ss_partition(PA, first, last, depth); + limit = ss_ilg(last - first); + } + depth += 1; + } + } +#undef STACK_SIZE +} + +#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */ + + +/*---------------------------------------------------------------------------*/ + +#if SS_BLOCKSIZE != 0 + +static INLINE +void +ss_blockswap(int *a, int *b, int n) { + int t; + for(; 0 < n; --n, ++a, ++b) { + t = *a, *a = *b, *b = t; + } +} + +static INLINE +void +ss_rotate(int *first, int *middle, int *last) { + int *a, *b, t; + int l, r; + l = middle - first, r = last - middle; + for(; (0 < l) && (0 < r);) { + if(l == r) { ss_blockswap(first, middle, l); break; } + if(l < r) { + a = last - 1, b = middle - 1; + t = *a; + do { + *a-- = *b, *b-- = *a; + if(b < first) { + *a = t; + last = a; + if((r -= l + 1) <= l) { break; } + a -= 1, b = middle - 1; + t = *a; + } + } while(1); + } else { + a = first, b = middle; + t = *a; + do { + *a++ = *b, *b++ = *a; + if(last <= b) { + *a = t; + first = a + 1; + if((l -= r + 1) <= r) { break; } + a += 1, b = middle; + t = *a; + } + } while(1); + } + } +} + + +/*---------------------------------------------------------------------------*/ + +static +void +ss_inplacemerge(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int depth) { + const int *p; + int *a, *b; + int len, half; + int q, r; + int x; + + for(;;) { + if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); } + else { x = 0; p = PA + *(last - 1); } + for(a = first, len = middle - first, half = len >> 1, r = -1; + 0 < len; + len = half, half >>= 1) { + b = a + half; + q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth); + if(q < 0) { + a = b + 1; + half -= (len & 1) ^ 1; + } else { + r = q; + } + } + if(a < middle) { + if(r == 0) { *a = ~*a; } + ss_rotate(a, middle, last); + last -= middle - a; + middle = a; + if(first == middle) { break; } + } + --last; + if(x != 0) { while(*--last < 0) { } } + if(middle == last) { break; } + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Merge-forward with internal buffer. */ +static +void +ss_mergeforward(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int *buf, int depth) { + int *a, *b, *c, *bufend; + int t; + int r; + + bufend = buf + (middle - first) - 1; + ss_blockswap(buf, first, middle - first); + + for(t = *(a = first), b = buf, c = middle;;) { + r = ss_compare(T, PA + *b, PA + *c, depth); + if(r < 0) { + do { + *a++ = *b; + if(bufend <= b) { *bufend = t; return; } + *b++ = *a; + } while(*b < 0); + } else if(r > 0) { + do { + *a++ = *c, *c++ = *a; + if(last <= c) { + while(b < bufend) { *a++ = *b, *b++ = *a; } + *a = *b, *b = t; + return; + } + } while(*c < 0); + } else { + *c = ~*c; + do { + *a++ = *b; + if(bufend <= b) { *bufend = t; return; } + *b++ = *a; + } while(*b < 0); + + do { + *a++ = *c, *c++ = *a; + if(last <= c) { + while(b < bufend) { *a++ = *b, *b++ = *a; } + *a = *b, *b = t; + return; + } + } while(*c < 0); + } + } +} + +/* Merge-backward with internal buffer. */ +static +void +ss_mergebackward(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int *buf, int depth) { + const int *p1, *p2; + int *a, *b, *c, *bufend; + int t; + int r; + int x; + + bufend = buf + (last - middle) - 1; + ss_blockswap(buf, middle, last - middle); + + x = 0; + if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; } + else { p1 = PA + *bufend; } + if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; } + else { p2 = PA + *(middle - 1); } + for(t = *(a = last - 1), b = bufend, c = middle - 1;;) { + r = ss_compare(T, p1, p2, depth); + if(0 < r) { + if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } + *a-- = *b; + if(b <= buf) { *buf = t; break; } + *b-- = *a; + if(*b < 0) { p1 = PA + ~*b; x |= 1; } + else { p1 = PA + *b; } + } else if(r < 0) { + if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } + *a-- = *c, *c-- = *a; + if(c < first) { + while(buf < b) { *a-- = *b, *b-- = *a; } + *a = *b, *b = t; + break; + } + if(*c < 0) { p2 = PA + ~*c; x |= 2; } + else { p2 = PA + *c; } + } else { + if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; } + *a-- = ~*b; + if(b <= buf) { *buf = t; break; } + *b-- = *a; + if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; } + *a-- = *c, *c-- = *a; + if(c < first) { + while(buf < b) { *a-- = *b, *b-- = *a; } + *a = *b, *b = t; + break; + } + if(*b < 0) { p1 = PA + ~*b; x |= 1; } + else { p1 = PA + *b; } + if(*c < 0) { p2 = PA + ~*c; x |= 2; } + else { p2 = PA + *c; } + } + } +} + +/* D&C based merge. */ +static +void +ss_swapmerge(const unsigned char *T, const int *PA, + int *first, int *middle, int *last, + int *buf, int bufsize, int depth) { +#define STACK_SIZE SS_SMERGE_STACKSIZE +#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a))) +#define MERGE_CHECK(a, b, c)\ + do {\ + if(((c) & 1) ||\ + (((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\ + *(a) = ~*(a);\ + }\ + if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\ + *(b) = ~*(b);\ + }\ + } while(0) + struct { int *a, *b, *c; int d; } stack[STACK_SIZE]; + int *l, *r, *lm, *rm; + int m, len, half; + int ssize; + int check, next; + + for(check = 0, ssize = 0;;) { + if((last - middle) <= bufsize) { + if((first < middle) && (middle < last)) { + ss_mergebackward(T, PA, first, middle, last, buf, depth); + } + MERGE_CHECK(first, last, check); + STACK_POP(first, middle, last, check); + continue; + } + + if((middle - first) <= bufsize) { + if(first < middle) { + ss_mergeforward(T, PA, first, middle, last, buf, depth); + } + MERGE_CHECK(first, last, check); + STACK_POP(first, middle, last, check); + continue; + } + + for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1; + 0 < len; + len = half, half >>= 1) { + if(ss_compare(T, PA + GETIDX(*(middle + m + half)), + PA + GETIDX(*(middle - m - half - 1)), depth) < 0) { + m += half + 1; + half -= (len & 1) ^ 1; + } + } + + if(0 < m) { + lm = middle - m, rm = middle + m; + ss_blockswap(lm, middle, m); + l = r = middle, next = 0; + if(rm < last) { + if(*rm < 0) { + *rm = ~*rm; + if(first < lm) { for(; *--l < 0;) { } next |= 4; } + next |= 1; + } else if(first < lm) { + for(; *r < 0; ++r) { } + next |= 2; + } + } + + if((l - first) <= (last - r)) { + STACK_PUSH(r, rm, last, (next & 3) | (check & 4)); + middle = lm, last = l, check = (check & 3) | (next & 4); + } else { + if((next & 2) && (r == middle)) { next ^= 6; } + STACK_PUSH(first, lm, l, (check & 3) | (next & 4)); + first = r, middle = rm, check = (next & 3) | (check & 4); + } + } else { + if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) { + *middle = ~*middle; + } + MERGE_CHECK(first, last, check); + STACK_POP(first, middle, last, check); + } + } +#undef STACK_SIZE +} + +#endif /* SS_BLOCKSIZE != 0 */ + + +/*---------------------------------------------------------------------------*/ + +/* Substring sort */ +static +void +sssort(const unsigned char *T, const int *PA, + int *first, int *last, + int *buf, int bufsize, + int depth, int n, int lastsuffix) { + int *a; +#if SS_BLOCKSIZE != 0 + int *b, *middle, *curbuf; + int j, k, curbufsize, limit; +#endif + int i; + + if(lastsuffix != 0) { ++first; } + +#if SS_BLOCKSIZE == 0 + ss_mintrosort(T, PA, first, last, depth); +#else + if((bufsize < SS_BLOCKSIZE) && + (bufsize < (last - first)) && + (bufsize < (limit = ss_isqrt(last - first)))) { + if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; } + buf = middle = last - limit, bufsize = limit; + } else { + middle = last, limit = 0; + } + for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) { +#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE + ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth); +#elif 1 < SS_BLOCKSIZE + ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth); +#endif + curbufsize = last - (a + SS_BLOCKSIZE); + curbuf = a + SS_BLOCKSIZE; + if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; } + for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) { + ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth); + } + } +#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE + ss_mintrosort(T, PA, a, middle, depth); +#elif 1 < SS_BLOCKSIZE + ss_insertionsort(T, PA, a, middle, depth); +#endif + for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) { + if(i & 1) { + ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth); + a -= k; + } + } + if(limit != 0) { +#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE + ss_mintrosort(T, PA, middle, last, depth); +#elif 1 < SS_BLOCKSIZE + ss_insertionsort(T, PA, middle, last, depth); +#endif + ss_inplacemerge(T, PA, first, middle, last, depth); + } +#endif + + if(lastsuffix != 0) { + /* Insert last type B* suffix. */ + int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2; + for(a = first, i = *(first - 1); + (a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth))); + ++a) { + *(a - 1) = *a; + } + *(a - 1) = i; + } +} + + +/*---------------------------------------------------------------------------*/ + +static INLINE +int +tr_ilg(int n) { + return (n & 0xffff0000) ? + ((n & 0xff000000) ? + 24 + lg_table[(n >> 24) & 0xff] : + 16 + lg_table[(n >> 16) & 0xff]) : + ((n & 0x0000ff00) ? + 8 + lg_table[(n >> 8) & 0xff] : + 0 + lg_table[(n >> 0) & 0xff]); +} + + +/*---------------------------------------------------------------------------*/ + +/* Simple insertionsort for small size groups. */ +static +void +tr_insertionsort(const int *ISAd, int *first, int *last) { + int *a, *b; + int t, r; + + for(a = first + 1; a < last; ++a) { + for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) { + do { *(b + 1) = *b; } while((first <= --b) && (*b < 0)); + if(b < first) { break; } + } + if(r == 0) { *b = ~*b; } + *(b + 1) = t; + } +} + + +/*---------------------------------------------------------------------------*/ + +static INLINE +void +tr_fixdown(const int *ISAd, int *SA, int i, int size) { + int j, k; + int v; + int c, d, e; + + for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) { + d = ISAd[SA[k = j++]]; + if(d < (e = ISAd[SA[j]])) { k = j; d = e; } + if(d <= c) { break; } + } + SA[i] = v; +} + +/* Simple top-down heapsort. */ +static +void +tr_heapsort(const int *ISAd, int *SA, int size) { + int i, m; + int t; + + m = size; + if((size % 2) == 0) { + m--; + if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); } + } + + for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); } + if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); } + for(i = m - 1; 0 < i; --i) { + t = SA[0], SA[0] = SA[i]; + tr_fixdown(ISAd, SA, 0, i); + SA[i] = t; + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Returns the median of three elements. */ +static INLINE +int * +tr_median3(const int *ISAd, int *v1, int *v2, int *v3) { + int *t; + if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); } + if(ISAd[*v2] > ISAd[*v3]) { + if(ISAd[*v1] > ISAd[*v3]) { return v1; } + else { return v3; } + } + return v2; +} + +/* Returns the median of five elements. */ +static INLINE +int * +tr_median5(const int *ISAd, + int *v1, int *v2, int *v3, int *v4, int *v5) { + int *t; + if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); } + if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); } + if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); } + if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); } + if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); } + if(ISAd[*v3] > ISAd[*v4]) { return v4; } + return v3; +} + +/* Returns the pivot element. */ +static INLINE +int * +tr_pivot(const int *ISAd, int *first, int *last) { + int *middle; + int t; + + t = last - first; + middle = first + t / 2; + + if(t <= 512) { + if(t <= 32) { + return tr_median3(ISAd, first, middle, last - 1); + } else { + t >>= 2; + return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1); + } + } + t >>= 3; + first = tr_median3(ISAd, first, first + t, first + (t << 1)); + middle = tr_median3(ISAd, middle - t, middle, middle + t); + last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1); + return tr_median3(ISAd, first, middle, last); +} + + +/*---------------------------------------------------------------------------*/ + +typedef struct _trbudget_t trbudget_t; +struct _trbudget_t { + int chance; + int remain; + int incval; + int count; +}; + +static INLINE +void +trbudget_init(trbudget_t *budget, int chance, int incval) { + budget->chance = chance; + budget->remain = budget->incval = incval; +} + +static INLINE +int +trbudget_check(trbudget_t *budget, int size) { + if(size <= budget->remain) { budget->remain -= size; return 1; } + if(budget->chance == 0) { budget->count += size; return 0; } + budget->remain += budget->incval - size; + budget->chance -= 1; + return 1; +} + + +/*---------------------------------------------------------------------------*/ + +static INLINE +void +tr_partition(const int *ISAd, + int *first, int *middle, int *last, + int **pa, int **pb, int v) { + int *a, *b, *c, *d, *e, *f; + int t, s; + int x = 0; + + for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { } + if(((a = b) < last) && (x < v)) { + for(; (++b < last) && ((x = ISAd[*b]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + } + for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { } + if((b < (d = c)) && (x > v)) { + for(; (b < --c) && ((x = ISAd[*c]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + for(; b < c;) { + SWAP(*b, *c); + for(; (++b < c) && ((x = ISAd[*b]) <= v);) { + if(x == v) { SWAP(*b, *a); ++a; } + } + for(; (b < --c) && ((x = ISAd[*c]) >= v);) { + if(x == v) { SWAP(*c, *d); --d; } + } + } + + if(a <= d) { + c = b - 1; + if((s = a - first) > (t = b - a)) { s = t; } + for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + if((s = d - c) > (t = last - d - 1)) { s = t; } + for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); } + first += (b - a), last -= (d - c); + } + *pa = first, *pb = last; +} + +static +void +tr_copy(int *ISA, const int *SA, + int *first, int *a, int *b, int *last, + int depth) { + /* sort suffixes of middle partition + by using sorted order of suffixes of left and right partition. */ + int *c, *d, *e; + int s, v; + + v = b - SA - 1; + for(c = first, d = a - 1; c <= d; ++c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *++d = s; + ISA[s] = d - SA; + } + } + for(c = last - 1, e = d + 1, d = b; e < d; --c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *--d = s; + ISA[s] = d - SA; + } + } +} + +static +void +tr_partialcopy(int *ISA, const int *SA, + int *first, int *a, int *b, int *last, + int depth) { + int *c, *d, *e; + int s, v; + int rank, lastrank, newrank = -1; + + v = b - SA - 1; + lastrank = -1; + for(c = first, d = a - 1; c <= d; ++c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *++d = s; + rank = ISA[s + depth]; + if(lastrank != rank) { lastrank = rank; newrank = d - SA; } + ISA[s] = newrank; + } + } + + lastrank = -1; + for(e = d; first <= e; --e) { + rank = ISA[*e]; + if(lastrank != rank) { lastrank = rank; newrank = e - SA; } + if(newrank != rank) { ISA[*e] = newrank; } + } + + lastrank = -1; + for(c = last - 1, e = d + 1, d = b; e < d; --c) { + if((0 <= (s = *c - depth)) && (ISA[s] == v)) { + *--d = s; + rank = ISA[s + depth]; + if(lastrank != rank) { lastrank = rank; newrank = d - SA; } + ISA[s] = newrank; + } + } +} + +static +void +tr_introsort(int *ISA, const int *ISAd, + int *SA, int *first, int *last, + trbudget_t *budget) { +#define STACK_SIZE TR_STACKSIZE + struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE]; + int *a, *b, *c; + int t; + int v, x = 0; + int incr = ISAd - ISA; + int limit, next; + int ssize, trlink = -1; + + for(ssize = 0, limit = tr_ilg(last - first);;) { + + if(limit < 0) { + if(limit == -1) { + /* tandem repeat partition */ + tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1); + + /* update ranks */ + if(a < last) { + for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } + } + if(b < last) { + for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } + } + + /* push */ + if(1 < (b - a)) { + STACK_PUSH5(NULL, a, b, 0, 0); + STACK_PUSH5(ISAd - incr, first, last, -2, trlink); + trlink = ssize - 2; + } + if((a - first) <= (last - b)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink); + last = a, limit = tr_ilg(a - first); + } else if(1 < (last - b)) { + first = b, limit = tr_ilg(last - b); + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } else { + if(1 < (last - b)) { + STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink); + first = b, limit = tr_ilg(last - b); + } else if(1 < (a - first)) { + last = a, limit = tr_ilg(a - first); + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } else if(limit == -2) { + /* tandem repeat copy */ + a = stack[--ssize].b, b = stack[ssize].c; + if(stack[ssize].d == 0) { + tr_copy(ISA, SA, first, a, b, last, ISAd - ISA); + } else { + if(0 <= trlink) { stack[trlink].d = -1; } + tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA); + } + STACK_POP5(ISAd, first, last, limit, trlink); + } else { + /* sorted partition */ + if(0 <= *first) { + a = first; + do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a)); + first = a; + } + if(first < last) { + a = first; do { *a = ~*a; } while(*++a < 0); + next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1; + if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } } + + /* push */ + if(trbudget_check(budget, a - first)) { + if((a - first) <= (last - a)) { + STACK_PUSH5(ISAd, a, last, -3, trlink); + ISAd += incr, last = a, limit = next; + } else { + if(1 < (last - a)) { + STACK_PUSH5(ISAd + incr, first, a, next, trlink); + first = a, limit = -3; + } else { + ISAd += incr, last = a, limit = next; + } + } + } else { + if(0 <= trlink) { stack[trlink].d = -1; } + if(1 < (last - a)) { + first = a, limit = -3; + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + continue; + } + + if((last - first) <= TR_INSERTIONSORT_THRESHOLD) { + tr_insertionsort(ISAd, first, last); + limit = -3; + continue; + } + + if(limit-- == 0) { + tr_heapsort(ISAd, first, last - first); + for(a = last - 1; first < a; a = b) { + for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; } + } + limit = -3; + continue; + } + + /* choose pivot */ + a = tr_pivot(ISAd, first, last); + SWAP(*first, *a); + v = ISAd[*first]; + + /* partition */ + tr_partition(ISAd, first, first + 1, last, &a, &b, v); + if((last - first) != (b - a)) { + next = (ISA[*a] != v) ? tr_ilg(b - a) : -1; + + /* update ranks */ + for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; } + if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } } + + /* push */ + if((1 < (b - a)) && (trbudget_check(budget, b - a))) { + if((a - first) <= (last - b)) { + if((last - b) <= (b - a)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + STACK_PUSH5(ISAd, b, last, limit, trlink); + last = a; + } else if(1 < (last - b)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + first = b; + } else { + ISAd += incr, first = a, last = b, limit = next; + } + } else if((a - first) <= (b - a)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd, b, last, limit, trlink); + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + last = a; + } else { + STACK_PUSH5(ISAd, b, last, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } else { + STACK_PUSH5(ISAd, b, last, limit, trlink); + STACK_PUSH5(ISAd, first, a, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } else { + if((a - first) <= (b - a)) { + if(1 < (last - b)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + STACK_PUSH5(ISAd, first, a, limit, trlink); + first = b; + } else if(1 < (a - first)) { + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + last = a; + } else { + ISAd += incr, first = a, last = b, limit = next; + } + } else if((last - b) <= (b - a)) { + if(1 < (last - b)) { + STACK_PUSH5(ISAd, first, a, limit, trlink); + STACK_PUSH5(ISAd + incr, a, b, next, trlink); + first = b; + } else { + STACK_PUSH5(ISAd, first, a, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } else { + STACK_PUSH5(ISAd, first, a, limit, trlink); + STACK_PUSH5(ISAd, b, last, limit, trlink); + ISAd += incr, first = a, last = b, limit = next; + } + } + } else { + if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; } + if((a - first) <= (last - b)) { + if(1 < (a - first)) { + STACK_PUSH5(ISAd, b, last, limit, trlink); + last = a; + } else if(1 < (last - b)) { + first = b; + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } else { + if(1 < (last - b)) { + STACK_PUSH5(ISAd, first, a, limit, trlink); + first = b; + } else if(1 < (a - first)) { + last = a; + } else { + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } + } else { + if(trbudget_check(budget, last - first)) { + limit = tr_ilg(last - first), ISAd += incr; + } else { + if(0 <= trlink) { stack[trlink].d = -1; } + STACK_POP5(ISAd, first, last, limit, trlink); + } + } + } +#undef STACK_SIZE +} + + + +/*---------------------------------------------------------------------------*/ + +/* Tandem repeat sort */ +static +void +trsort(int *ISA, int *SA, int n, int depth) { + int *ISAd; + int *first, *last; + trbudget_t budget; + int t, skip, unsorted; + + trbudget_init(&budget, tr_ilg(n) * 2 / 3, n); +/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */ + for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) { + first = SA; + skip = 0; + unsorted = 0; + do { + if((t = *first) < 0) { first -= t; skip += t; } + else { + if(skip != 0) { *(first + skip) = skip; skip = 0; } + last = SA + ISA[t] + 1; + if(1 < (last - first)) { + budget.count = 0; + tr_introsort(ISA, ISAd, SA, first, last, &budget); + if(budget.count != 0) { unsorted += budget.count; } + else { skip = first - last; } + } else if((last - first) == 1) { + skip = -1; + } + first = last; + } + } while(first < (SA + n)); + if(skip != 0) { *(first + skip) = skip; } + if(unsorted == 0) { break; } + } +} + + +/*---------------------------------------------------------------------------*/ + +/* Sorts suffixes of type B*. */ +static +int +sort_typeBstar(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int openMP) { + int *PAb, *ISAb, *buf; +#ifdef LIBBSC_OPENMP + int *curbuf; + int l; +#endif + int i, j, k, t, m, bufsize; + int c0, c1; +#ifdef LIBBSC_OPENMP + int d0, d1; +#endif + (void)openMP; + + /* Initialize bucket arrays. */ + for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; } + for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; } + + /* Count the number of occurrences of the first one or two characters of each + type A, B and B* suffix. Moreover, store the beginning position of all + type B* suffixes into the array SA. */ + for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) { + /* type A suffix. */ + do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1)); + if(0 <= i) { + /* type B* suffix. */ + ++BUCKET_BSTAR(c0, c1); + SA[--m] = i; + /* type B suffix. */ + for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { + ++BUCKET_B(c0, c1); + } + } + } + m = n - m; +/* +note: + A type B* suffix is lexicographically smaller than a type B suffix that + begins with the same first two characters. +*/ + + /* Calculate the index of start/end point of each bucket. */ + for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) { + t = i + BUCKET_A(c0); + BUCKET_A(c0) = i + j; /* start point */ + i = t + BUCKET_B(c0, c0); + for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) { + j += BUCKET_BSTAR(c0, c1); + BUCKET_BSTAR(c0, c1) = j; /* end point */ + i += BUCKET_B(c0, c1); + } + } + + if(0 < m) { + /* Sort the type B* suffixes by their first two characters. */ + PAb = SA + n - m; ISAb = SA + m; + for(i = m - 2; 0 <= i; --i) { + t = PAb[i], c0 = T[t], c1 = T[t + 1]; + SA[--BUCKET_BSTAR(c0, c1)] = i; + } + t = PAb[m - 1], c0 = T[t], c1 = T[t + 1]; + SA[--BUCKET_BSTAR(c0, c1)] = m - 1; + + /* Sort the type B* substrings using sssort. */ +#ifdef LIBBSC_OPENMP + if (openMP) + { + buf = SA + m; + c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m; +#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1) + { + bufsize = (n - (2 * m)) / omp_get_num_threads(); + curbuf = buf + omp_get_thread_num() * bufsize; + k = 0; + for(;;) { + #pragma omp critical(sssort_lock) + { + if(0 < (l = j)) { + d0 = c0, d1 = c1; + do { + k = BUCKET_BSTAR(d0, d1); + if(--d1 <= d0) { + d1 = ALPHABET_SIZE - 1; + if(--d0 < 0) { break; } + } + } while(((l - k) <= 1) && (0 < (l = k))); + c0 = d0, c1 = d1, j = k; + } + } + if(l == 0) { break; } + sssort(T, PAb, SA + k, SA + l, + curbuf, bufsize, 2, n, *(SA + k) == (m - 1)); + } + } + } + else + { + buf = SA + m, bufsize = n - (2 * m); + for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { + for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { + i = BUCKET_BSTAR(c0, c1); + if(1 < (j - i)) { + sssort(T, PAb, SA + i, SA + j, + buf, bufsize, 2, n, *(SA + i) == (m - 1)); + } + } + } + } +#else + buf = SA + m, bufsize = n - (2 * m); + for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) { + for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) { + i = BUCKET_BSTAR(c0, c1); + if(1 < (j - i)) { + sssort(T, PAb, SA + i, SA + j, + buf, bufsize, 2, n, *(SA + i) == (m - 1)); + } + } + } +#endif + + /* Compute ranks of type B* substrings. */ + for(i = m - 1; 0 <= i; --i) { + if(0 <= SA[i]) { + j = i; + do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i])); + SA[i + 1] = i - j; + if(i <= 0) { break; } + } + j = i; + do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0); + ISAb[SA[i]] = j; + } + + /* Construct the inverse suffix array of type B* suffixes using trsort. */ + trsort(ISAb, SA, m, 1); + + /* Set the sorted order of tyoe B* suffixes. */ + for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) { + for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { } + if(0 <= i) { + t = i; + for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { } + SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t; + } + } + + /* Calculate the index of start/end point of each bucket. */ + BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */ + for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) { + i = BUCKET_A(c0 + 1) - 1; + for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) { + t = i - BUCKET_B(c0, c1); + BUCKET_B(c0, c1) = i; /* end point */ + + /* Move all type B* suffixes to the correct position. */ + for(i = t, j = BUCKET_BSTAR(c0, c1); + j <= k; + --i, --k) { SA[i] = SA[k]; } + } + BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */ + BUCKET_B(c0, c0) = i; /* end point */ + } + } + + return m; +} + +/* Constructs the suffix array by using the sorted order of type B* suffixes. */ +static +void +construct_SA(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int m) { + int *i, *j, *k; + int s; + int c0, c1, c2; + + if(0 < m) { + /* Construct the sorted order of type B suffixes by using + the sorted order of type B* suffixes. */ + for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { + /* Scan the suffix array from right to left. */ + for(i = SA + BUCKET_BSTAR(c1, c1 + 1), + j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; + i <= j; + --j) { + if(0 < (s = *j)) { + assert(T[s] == c1); + assert(((s + 1) < n) && (T[s] <= T[s + 1])); + assert(T[s - 1] <= T[s]); + *j = ~s; + c0 = T[--s]; + if((0 < s) && (T[s - 1] > c0)) { s = ~s; } + if(c0 != c2) { + if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + k = SA + BUCKET_B(c2 = c0, c1); + } + assert(k < j); assert(k != NULL); + *k-- = s; + } else { + assert(((s == 0) && (T[s] == c1)) || (s < 0)); + *j = ~s; + } + } + } + } + + /* Construct the suffix array by using + the sorted order of type B suffixes. */ + k = SA + BUCKET_A(c2 = T[n - 1]); + *k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1); + /* Scan the suffix array from left to right. */ + for(i = SA, j = SA + n; i < j; ++i) { + if(0 < (s = *i)) { + assert(T[s - 1] >= T[s]); + c0 = T[--s]; + if((s == 0) || (T[s - 1] < c0)) { s = ~s; } + if(c0 != c2) { + BUCKET_A(c2) = k - SA; + k = SA + BUCKET_A(c2 = c0); + } + assert(i < k); + *k++ = s; + } else { + assert(s < 0); + *i = ~s; + } + } +} + +/* Constructs the burrows-wheeler transformed string directly + by using the sorted order of type B* suffixes. */ +static +int +construct_BWT(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int m) { + int *i, *j, *k, *orig; + int s; + int c0, c1, c2; + + if(0 < m) { + /* Construct the sorted order of type B suffixes by using + the sorted order of type B* suffixes. */ + for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { + /* Scan the suffix array from right to left. */ + for(i = SA + BUCKET_BSTAR(c1, c1 + 1), + j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; + i <= j; + --j) { + if(0 < (s = *j)) { + assert(T[s] == c1); + assert(((s + 1) < n) && (T[s] <= T[s + 1])); + assert(T[s - 1] <= T[s]); + c0 = T[--s]; + *j = ~((int)c0); + if((0 < s) && (T[s - 1] > c0)) { s = ~s; } + if(c0 != c2) { + if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + k = SA + BUCKET_B(c2 = c0, c1); + } + assert(k < j); assert(k != NULL); + *k-- = s; + } else if(s != 0) { + *j = ~s; +#ifndef NDEBUG + } else { + assert(T[s] == c1); +#endif + } + } + } + } + + /* Construct the BWTed string by using + the sorted order of type B suffixes. */ + k = SA + BUCKET_A(c2 = T[n - 1]); + *k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1); + /* Scan the suffix array from left to right. */ + for(i = SA, j = SA + n, orig = SA; i < j; ++i) { + if(0 < (s = *i)) { + assert(T[s - 1] >= T[s]); + c0 = T[--s]; + *i = c0; + if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); } + if(c0 != c2) { + BUCKET_A(c2) = k - SA; + k = SA + BUCKET_A(c2 = c0); + } + assert(i < k); + *k++ = s; + } else if(s != 0) { + *i = ~s; + } else { + orig = i; + } + } + + return orig - SA; +} + +/* Constructs the burrows-wheeler transformed string directly + by using the sorted order of type B* suffixes. */ +static +int +construct_BWT_indexes(const unsigned char *T, int *SA, + int *bucket_A, int *bucket_B, + int n, int m, + unsigned char * num_indexes, int * indexes) { + int *i, *j, *k, *orig; + int s; + int c0, c1, c2; + + int mod = n / 8; + { + mod |= mod >> 1; mod |= mod >> 2; + mod |= mod >> 4; mod |= mod >> 8; + mod |= mod >> 16; mod >>= 1; + + *num_indexes = (unsigned char)((n - 1) / (mod + 1)); + } + + if(0 < m) { + /* Construct the sorted order of type B suffixes by using + the sorted order of type B* suffixes. */ + for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) { + /* Scan the suffix array from right to left. */ + for(i = SA + BUCKET_BSTAR(c1, c1 + 1), + j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1; + i <= j; + --j) { + if(0 < (s = *j)) { + assert(T[s] == c1); + assert(((s + 1) < n) && (T[s] <= T[s + 1])); + assert(T[s - 1] <= T[s]); + + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA; + + c0 = T[--s]; + *j = ~((int)c0); + if((0 < s) && (T[s - 1] > c0)) { s = ~s; } + if(c0 != c2) { + if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; } + k = SA + BUCKET_B(c2 = c0, c1); + } + assert(k < j); assert(k != NULL); + *k-- = s; + } else if(s != 0) { + *j = ~s; +#ifndef NDEBUG + } else { + assert(T[s] == c1); +#endif + } + } + } + } + + /* Construct the BWTed string by using + the sorted order of type B suffixes. */ + k = SA + BUCKET_A(c2 = T[n - 1]); + if (T[n - 2] < c2) { + if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA; + *k++ = ~((int)T[n - 2]); + } + else { + *k++ = n - 1; + } + + /* Scan the suffix array from left to right. */ + for(i = SA, j = SA + n, orig = SA; i < j; ++i) { + if(0 < (s = *i)) { + assert(T[s - 1] >= T[s]); + + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA; + + c0 = T[--s]; + *i = c0; + if(c0 != c2) { + BUCKET_A(c2) = k - SA; + k = SA + BUCKET_A(c2 = c0); + } + assert(i < k); + if((0 < s) && (T[s - 1] < c0)) { + if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA; + *k++ = ~((int)T[s - 1]); + } else + *k++ = s; + } else if(s != 0) { + *i = ~s; + } else { + orig = i; + } + } + + return orig - SA; +} + + +/*---------------------------------------------------------------------------*/ + +/*- Function -*/ + +int +divsufsort(const unsigned char *T, int *SA, int n, int openMP) { + int *bucket_A, *bucket_B; + int m; + int err = 0; + + /* Check arguments. */ + if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; } + else if(n == 0) { return 0; } + else if(n == 1) { SA[0] = 0; return 0; } + else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; } + + bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); + bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); + + /* Suffixsort. */ + if((bucket_A != NULL) && (bucket_B != NULL)) { + m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP); + construct_SA(T, SA, bucket_A, bucket_B, n, m); + } else { + err = -2; + } + + free(bucket_B); + free(bucket_A); + + return err; +} + +int +divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) { + int *B; + int *bucket_A, *bucket_B; + int m, pidx, i; + + /* Check arguments. */ + if((T == NULL) || (U == NULL) || (n < 0)) { return -1; } + else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; } + + if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); } + bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int)); + bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int)); + + /* Burrows-Wheeler Transform. */ + if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) { + m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP); + + if (num_indexes == NULL || indexes == NULL) { + pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m); + } else { + pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes); + } + + /* Copy to output string. */ + U[0] = T[n - 1]; + for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; } + for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; } + pidx += 1; + } else { + pidx = -2; + } + + free(bucket_B); + free(bucket_A); + if(A == NULL) { free(B); } + + return pidx; +} diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.h b/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.h new file mode 100644 index 0000000000..5440994af1 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/divsufsort.h @@ -0,0 +1,67 @@ +/* + * divsufsort.h for libdivsufsort-lite + * Copyright (c) 2003-2008 Yuta Mori All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, + * copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following + * conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef _DIVSUFSORT_H +#define _DIVSUFSORT_H 1 + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + +/*- Prototypes -*/ + +/** + * Constructs the suffix array of a given string. + * @param T [0..n-1] The input string. + * @param SA [0..n-1] The output array of suffixes. + * @param n The length of the given string. + * @param openMP enables OpenMP optimization. + * @return 0 if no error occurred, -1 or -2 otherwise. + */ +int +divsufsort(const unsigned char *T, int *SA, int n, int openMP); + +/** + * Constructs the burrows-wheeler transformed string of a given string. + * @param T [0..n-1] The input string. + * @param U [0..n-1] The output string. (can be T) + * @param A [0..n-1] The temporary array. (can be NULL) + * @param n The length of the given string. + * @param num_indexes The length of secondary indexes array. (can be NULL) + * @param indexes The secondary indexes array. (can be NULL) + * @param openMP enables OpenMP optimization. + * @return The primary index if no error occurred, -1 or -2 otherwise. + */ +int +divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP); + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif /* _DIVSUFSORT_H */ diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/fastcover.c b/src/borg/algorithms/zstd/lib/dictBuilder/fastcover.c new file mode 100644 index 0000000000..485c333b54 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/fastcover.c @@ -0,0 +1,757 @@ +/* + * Copyright (c) 2018-2020, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +/*-************************************* +* Dependencies +***************************************/ +#include /* fprintf */ +#include /* malloc, free, qsort */ +#include /* memset */ +#include /* clock */ + +#include "../common/mem.h" /* read */ +#include "../common/pool.h" +#include "../common/threading.h" +#include "cover.h" +#include "../common/zstd_internal.h" /* includes zstd.h */ +#ifndef ZDICT_STATIC_LINKING_ONLY +#define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" + + +/*-************************************* +* Constants +***************************************/ +#define FASTCOVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB)) +#define FASTCOVER_MAX_F 31 +#define FASTCOVER_MAX_ACCEL 10 +#define DEFAULT_SPLITPOINT 0.75 +#define DEFAULT_F 20 +#define DEFAULT_ACCEL 1 + + +/*-************************************* +* Console display +***************************************/ +static int g_displayLevel = 2; +#define DISPLAY(...) \ + { \ + fprintf(stderr, __VA_ARGS__); \ + fflush(stderr); \ + } +#define LOCALDISPLAYLEVEL(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + DISPLAY(__VA_ARGS__); \ + } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ +#define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__) + +#define LOCALDISPLAYUPDATE(displayLevel, l, ...) \ + if (displayLevel >= l) { \ + if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) { \ + g_time = clock(); \ + DISPLAY(__VA_ARGS__); \ + } \ + } +#define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__) +static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100; +static clock_t g_time = 0; + + +/*-************************************* +* Hash Functions +***************************************/ +static const U64 prime6bytes = 227718039650203ULL; +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } + +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } + + +/** + * Hash the d-byte value pointed to by p and mod 2^f + */ +static size_t FASTCOVER_hashPtrToIndex(const void* p, U32 h, unsigned d) { + if (d == 6) { + return ZSTD_hash6Ptr(p, h) & ((1 << h) - 1); + } + return ZSTD_hash8Ptr(p, h) & ((1 << h) - 1); +} + + +/*-************************************* +* Acceleration +***************************************/ +typedef struct { + unsigned finalize; /* Percentage of training samples used for ZDICT_finalizeDictionary */ + unsigned skip; /* Number of dmer skipped between each dmer counted in computeFrequency */ +} FASTCOVER_accel_t; + + +static const FASTCOVER_accel_t FASTCOVER_defaultAccelParameters[FASTCOVER_MAX_ACCEL+1] = { + { 100, 0 }, /* accel = 0, should not happen because accel = 0 defaults to accel = 1 */ + { 100, 0 }, /* accel = 1 */ + { 50, 1 }, /* accel = 2 */ + { 34, 2 }, /* accel = 3 */ + { 25, 3 }, /* accel = 4 */ + { 20, 4 }, /* accel = 5 */ + { 17, 5 }, /* accel = 6 */ + { 14, 6 }, /* accel = 7 */ + { 13, 7 }, /* accel = 8 */ + { 11, 8 }, /* accel = 9 */ + { 10, 9 }, /* accel = 10 */ +}; + + +/*-************************************* +* Context +***************************************/ +typedef struct { + const BYTE *samples; + size_t *offsets; + const size_t *samplesSizes; + size_t nbSamples; + size_t nbTrainSamples; + size_t nbTestSamples; + size_t nbDmers; + U32 *freqs; + unsigned d; + unsigned f; + FASTCOVER_accel_t accelParams; +} FASTCOVER_ctx_t; + + +/*-************************************* +* Helper functions +***************************************/ +/** + * Selects the best segment in an epoch. + * Segments of are scored according to the function: + * + * Let F(d) be the frequency of all dmers with hash value d. + * Let S_i be hash value of the dmer at position i of segment S which has length k. + * + * Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1}) + * + * Once the dmer with hash value d is in the dictionary we set F(d) = 0. + */ +static COVER_segment_t FASTCOVER_selectSegment(const FASTCOVER_ctx_t *ctx, + U32 *freqs, U32 begin, U32 end, + ZDICT_cover_params_t parameters, + U16* segmentFreqs) { + /* Constants */ + const U32 k = parameters.k; + const U32 d = parameters.d; + const U32 f = ctx->f; + const U32 dmersInK = k - d + 1; + + /* Try each segment (activeSegment) and save the best (bestSegment) */ + COVER_segment_t bestSegment = {0, 0, 0}; + COVER_segment_t activeSegment; + + /* Reset the activeDmers in the segment */ + /* The activeSegment starts at the beginning of the epoch. */ + activeSegment.begin = begin; + activeSegment.end = begin; + activeSegment.score = 0; + + /* Slide the activeSegment through the whole epoch. + * Save the best segment in bestSegment. + */ + while (activeSegment.end < end) { + /* Get hash value of current dmer */ + const size_t idx = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.end, f, d); + + /* Add frequency of this index to score if this is the first occurrence of index in active segment */ + if (segmentFreqs[idx] == 0) { + activeSegment.score += freqs[idx]; + } + /* Increment end of segment and segmentFreqs*/ + activeSegment.end += 1; + segmentFreqs[idx] += 1; + /* If the window is now too large, drop the first position */ + if (activeSegment.end - activeSegment.begin == dmersInK + 1) { + /* Get hash value of the dmer to be eliminated from active segment */ + const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + segmentFreqs[delIndex] -= 1; + /* Subtract frequency of this index from score if this is the last occurrence of this index in active segment */ + if (segmentFreqs[delIndex] == 0) { + activeSegment.score -= freqs[delIndex]; + } + /* Increment start of segment */ + activeSegment.begin += 1; + } + + /* If this segment is the best so far save it */ + if (activeSegment.score > bestSegment.score) { + bestSegment = activeSegment; + } + } + + /* Zero out rest of segmentFreqs array */ + while (activeSegment.begin < end) { + const size_t delIndex = FASTCOVER_hashPtrToIndex(ctx->samples + activeSegment.begin, f, d); + segmentFreqs[delIndex] -= 1; + activeSegment.begin += 1; + } + + { + /* Zero the frequency of hash value of each dmer covered by the chosen segment. */ + U32 pos; + for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) { + const size_t i = FASTCOVER_hashPtrToIndex(ctx->samples + pos, f, d); + freqs[i] = 0; + } + } + + return bestSegment; +} + + +static int FASTCOVER_checkParameters(ZDICT_cover_params_t parameters, + size_t maxDictSize, unsigned f, + unsigned accel) { + /* k, d, and f are required parameters */ + if (parameters.d == 0 || parameters.k == 0) { + return 0; + } + /* d has to be 6 or 8 */ + if (parameters.d != 6 && parameters.d != 8) { + return 0; + } + /* k <= maxDictSize */ + if (parameters.k > maxDictSize) { + return 0; + } + /* d <= k */ + if (parameters.d > parameters.k) { + return 0; + } + /* 0 < f <= FASTCOVER_MAX_F*/ + if (f > FASTCOVER_MAX_F || f == 0) { + return 0; + } + /* 0 < splitPoint <= 1 */ + if (parameters.splitPoint <= 0 || parameters.splitPoint > 1) { + return 0; + } + /* 0 < accel <= 10 */ + if (accel > 10 || accel == 0) { + return 0; + } + return 1; +} + + +/** + * Clean up a context initialized with `FASTCOVER_ctx_init()`. + */ +static void +FASTCOVER_ctx_destroy(FASTCOVER_ctx_t* ctx) +{ + if (!ctx) return; + + free(ctx->freqs); + ctx->freqs = NULL; + + free(ctx->offsets); + ctx->offsets = NULL; +} + + +/** + * Calculate for frequency of hash value of each dmer in ctx->samples + */ +static void +FASTCOVER_computeFrequency(U32* freqs, const FASTCOVER_ctx_t* ctx) +{ + const unsigned f = ctx->f; + const unsigned d = ctx->d; + const unsigned skip = ctx->accelParams.skip; + const unsigned readLength = MAX(d, 8); + size_t i; + assert(ctx->nbTrainSamples >= 5); + assert(ctx->nbTrainSamples <= ctx->nbSamples); + for (i = 0; i < ctx->nbTrainSamples; i++) { + size_t start = ctx->offsets[i]; /* start of current dmer */ + size_t const currSampleEnd = ctx->offsets[i+1]; + while (start + readLength <= currSampleEnd) { + const size_t dmerIndex = FASTCOVER_hashPtrToIndex(ctx->samples + start, f, d); + freqs[dmerIndex]++; + start = start + skip + 1; + } + } +} + + +/** + * Prepare a context for dictionary building. + * The context is only dependent on the parameter `d` and can used multiple + * times. + * Returns 0 on success or error code on error. + * The context must be destroyed with `FASTCOVER_ctx_destroy()`. + */ +static size_t +FASTCOVER_ctx_init(FASTCOVER_ctx_t* ctx, + const void* samplesBuffer, + const size_t* samplesSizes, unsigned nbSamples, + unsigned d, double splitPoint, unsigned f, + FASTCOVER_accel_t accelParams) +{ + const BYTE* const samples = (const BYTE*)samplesBuffer; + const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples); + /* Split samples into testing and training sets */ + const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples; + const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples; + const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize; + const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize; + + /* Checks */ + if (totalSamplesSize < MAX(d, sizeof(U64)) || + totalSamplesSize >= (size_t)FASTCOVER_MAX_SAMPLES_SIZE) { + DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n", + (unsigned)(totalSamplesSize >> 20), (FASTCOVER_MAX_SAMPLES_SIZE >> 20)); + return ERROR(srcSize_wrong); + } + + /* Check if there are at least 5 training samples */ + if (nbTrainSamples < 5) { + DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid\n", nbTrainSamples); + return ERROR(srcSize_wrong); + } + + /* Check if there's testing sample */ + if (nbTestSamples < 1) { + DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.\n", nbTestSamples); + return ERROR(srcSize_wrong); + } + + /* Zero the context */ + memset(ctx, 0, sizeof(*ctx)); + DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples, + (unsigned)trainingSamplesSize); + DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples, + (unsigned)testSamplesSize); + + ctx->samples = samples; + ctx->samplesSizes = samplesSizes; + ctx->nbSamples = nbSamples; + ctx->nbTrainSamples = nbTrainSamples; + ctx->nbTestSamples = nbTestSamples; + ctx->nbDmers = trainingSamplesSize - MAX(d, sizeof(U64)) + 1; + ctx->d = d; + ctx->f = f; + ctx->accelParams = accelParams; + + /* The offsets of each file */ + ctx->offsets = (size_t*)calloc((nbSamples + 1), sizeof(size_t)); + if (ctx->offsets == NULL) { + DISPLAYLEVEL(1, "Failed to allocate scratch buffers \n"); + FASTCOVER_ctx_destroy(ctx); + return ERROR(memory_allocation); + } + + /* Fill offsets from the samplesSizes */ + { U32 i; + ctx->offsets[0] = 0; + assert(nbSamples >= 5); + for (i = 1; i <= nbSamples; ++i) { + ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1]; + } + } + + /* Initialize frequency array of size 2^f */ + ctx->freqs = (U32*)calloc(((U64)1 << f), sizeof(U32)); + if (ctx->freqs == NULL) { + DISPLAYLEVEL(1, "Failed to allocate frequency table \n"); + FASTCOVER_ctx_destroy(ctx); + return ERROR(memory_allocation); + } + + DISPLAYLEVEL(2, "Computing frequencies\n"); + FASTCOVER_computeFrequency(ctx->freqs, ctx); + + return 0; +} + + +/** + * Given the prepared context build the dictionary. + */ +static size_t +FASTCOVER_buildDictionary(const FASTCOVER_ctx_t* ctx, + U32* freqs, + void* dictBuffer, size_t dictBufferCapacity, + ZDICT_cover_params_t parameters, + U16* segmentFreqs) +{ + BYTE *const dict = (BYTE *)dictBuffer; + size_t tail = dictBufferCapacity; + /* Divide the data into epochs. We will select one segment from each epoch. */ + const COVER_epoch_info_t epochs = COVER_computeEpochs( + (U32)dictBufferCapacity, (U32)ctx->nbDmers, parameters.k, 1); + const size_t maxZeroScoreRun = 10; + size_t zeroScoreRun = 0; + size_t epoch; + DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n", + (U32)epochs.num, (U32)epochs.size); + /* Loop through the epochs until there are no more segments or the dictionary + * is full. + */ + for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) { + const U32 epochBegin = (U32)(epoch * epochs.size); + const U32 epochEnd = epochBegin + epochs.size; + size_t segmentSize; + /* Select a segment */ + COVER_segment_t segment = FASTCOVER_selectSegment( + ctx, freqs, epochBegin, epochEnd, parameters, segmentFreqs); + + /* If the segment covers no dmers, then we are out of content. + * There may be new content in other epochs, for continue for some time. + */ + if (segment.score == 0) { + if (++zeroScoreRun >= maxZeroScoreRun) { + break; + } + continue; + } + zeroScoreRun = 0; + + /* Trim the segment if necessary and if it is too small then we are done */ + segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail); + if (segmentSize < parameters.d) { + break; + } + + /* We fill the dictionary from the back to allow the best segments to be + * referenced with the smallest offsets. + */ + tail -= segmentSize; + memcpy(dict + tail, ctx->samples + segment.begin, segmentSize); + DISPLAYUPDATE( + 2, "\r%u%% ", + (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity)); + } + DISPLAYLEVEL(2, "\r%79s\r", ""); + return tail; +} + +/** + * Parameters for FASTCOVER_tryParameters(). + */ +typedef struct FASTCOVER_tryParameters_data_s { + const FASTCOVER_ctx_t* ctx; + COVER_best_t* best; + size_t dictBufferCapacity; + ZDICT_cover_params_t parameters; +} FASTCOVER_tryParameters_data_t; + + +/** + * Tries a set of parameters and updates the COVER_best_t with the results. + * This function is thread safe if zstd is compiled with multithreaded support. + * It takes its parameters as an *OWNING* opaque pointer to support threading. + */ +static void FASTCOVER_tryParameters(void *opaque) +{ + /* Save parameters as local variables */ + FASTCOVER_tryParameters_data_t *const data = (FASTCOVER_tryParameters_data_t *)opaque; + const FASTCOVER_ctx_t *const ctx = data->ctx; + const ZDICT_cover_params_t parameters = data->parameters; + size_t dictBufferCapacity = data->dictBufferCapacity; + size_t totalCompressedSize = ERROR(GENERIC); + /* Initialize array to keep track of frequency of dmer within activeSegment */ + U16* segmentFreqs = (U16 *)calloc(((U64)1 << ctx->f), sizeof(U16)); + /* Allocate space for hash table, dict, and freqs */ + BYTE *const dict = (BYTE * const)malloc(dictBufferCapacity); + COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC)); + U32 *freqs = (U32*) malloc(((U64)1 << ctx->f) * sizeof(U32)); + if (!segmentFreqs || !dict || !freqs) { + DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n"); + goto _cleanup; + } + /* Copy the frequencies because we need to modify them */ + memcpy(freqs, ctx->freqs, ((U64)1 << ctx->f) * sizeof(U32)); + /* Build the dictionary */ + { const size_t tail = FASTCOVER_buildDictionary(ctx, freqs, dict, dictBufferCapacity, + parameters, segmentFreqs); + + const unsigned nbFinalizeSamples = (unsigned)(ctx->nbTrainSamples * ctx->accelParams.finalize / 100); + selection = COVER_selectDict(dict + tail, dictBufferCapacity - tail, + ctx->samples, ctx->samplesSizes, nbFinalizeSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets, + totalCompressedSize); + + if (COVER_dictSelectionIsError(selection)) { + DISPLAYLEVEL(1, "Failed to select dictionary\n"); + goto _cleanup; + } + } +_cleanup: + free(dict); + COVER_best_finish(data->best, parameters, selection); + free(data); + free(segmentFreqs); + COVER_dictSelectionFree(selection); + free(freqs); +} + + +static void +FASTCOVER_convertToCoverParams(ZDICT_fastCover_params_t fastCoverParams, + ZDICT_cover_params_t* coverParams) +{ + coverParams->k = fastCoverParams.k; + coverParams->d = fastCoverParams.d; + coverParams->steps = fastCoverParams.steps; + coverParams->nbThreads = fastCoverParams.nbThreads; + coverParams->splitPoint = fastCoverParams.splitPoint; + coverParams->zParams = fastCoverParams.zParams; + coverParams->shrinkDict = fastCoverParams.shrinkDict; +} + + +static void +FASTCOVER_convertToFastCoverParams(ZDICT_cover_params_t coverParams, + ZDICT_fastCover_params_t* fastCoverParams, + unsigned f, unsigned accel) +{ + fastCoverParams->k = coverParams.k; + fastCoverParams->d = coverParams.d; + fastCoverParams->steps = coverParams.steps; + fastCoverParams->nbThreads = coverParams.nbThreads; + fastCoverParams->splitPoint = coverParams.splitPoint; + fastCoverParams->f = f; + fastCoverParams->accel = accel; + fastCoverParams->zParams = coverParams.zParams; + fastCoverParams->shrinkDict = coverParams.shrinkDict; +} + + +ZDICTLIB_API size_t +ZDICT_trainFromBuffer_fastCover(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, + const size_t* samplesSizes, unsigned nbSamples, + ZDICT_fastCover_params_t parameters) +{ + BYTE* const dict = (BYTE*)dictBuffer; + FASTCOVER_ctx_t ctx; + ZDICT_cover_params_t coverParams; + FASTCOVER_accel_t accelParams; + /* Initialize global data */ + g_displayLevel = parameters.zParams.notificationLevel; + /* Assign splitPoint and f if not provided */ + parameters.splitPoint = 1.0; + parameters.f = parameters.f == 0 ? DEFAULT_F : parameters.f; + parameters.accel = parameters.accel == 0 ? DEFAULT_ACCEL : parameters.accel; + /* Convert to cover parameter */ + memset(&coverParams, 0 , sizeof(coverParams)); + FASTCOVER_convertToCoverParams(parameters, &coverParams); + /* Checks */ + if (!FASTCOVER_checkParameters(coverParams, dictBufferCapacity, parameters.f, + parameters.accel)) { + DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); + return ERROR(parameter_outOfBound); + } + if (nbSamples == 0) { + DISPLAYLEVEL(1, "FASTCOVER must have at least one input file\n"); + return ERROR(srcSize_wrong); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + /* Assign corresponding FASTCOVER_accel_t to accelParams*/ + accelParams = FASTCOVER_defaultAccelParameters[parameters.accel]; + /* Initialize context */ + { + size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, + coverParams.d, parameters.splitPoint, parameters.f, + accelParams); + if (ZSTD_isError(initVal)) { + DISPLAYLEVEL(1, "Failed to initialize context\n"); + return initVal; + } + } + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, g_displayLevel); + /* Build the dictionary */ + DISPLAYLEVEL(2, "Building dictionary\n"); + { + /* Initialize array to keep track of frequency of dmer within activeSegment */ + U16* segmentFreqs = (U16 *)calloc(((U64)1 << parameters.f), sizeof(U16)); + const size_t tail = FASTCOVER_buildDictionary(&ctx, ctx.freqs, dictBuffer, + dictBufferCapacity, coverParams, segmentFreqs); + const unsigned nbFinalizeSamples = (unsigned)(ctx.nbTrainSamples * ctx.accelParams.finalize / 100); + const size_t dictionarySize = ZDICT_finalizeDictionary( + dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail, + samplesBuffer, samplesSizes, nbFinalizeSamples, coverParams.zParams); + if (!ZSTD_isError(dictionarySize)) { + DISPLAYLEVEL(2, "Constructed dictionary of size %u\n", + (unsigned)dictionarySize); + } + FASTCOVER_ctx_destroy(&ctx); + free(segmentFreqs); + return dictionarySize; + } +} + + +ZDICTLIB_API size_t +ZDICT_optimizeTrainFromBuffer_fastCover( + void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, + const size_t* samplesSizes, unsigned nbSamples, + ZDICT_fastCover_params_t* parameters) +{ + ZDICT_cover_params_t coverParams; + FASTCOVER_accel_t accelParams; + /* constants */ + const unsigned nbThreads = parameters->nbThreads; + const double splitPoint = + parameters->splitPoint <= 0.0 ? DEFAULT_SPLITPOINT : parameters->splitPoint; + const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d; + const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d; + const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k; + const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k; + const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps; + const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1); + const unsigned kIterations = + (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize); + const unsigned f = parameters->f == 0 ? DEFAULT_F : parameters->f; + const unsigned accel = parameters->accel == 0 ? DEFAULT_ACCEL : parameters->accel; + const unsigned shrinkDict = 0; + /* Local variables */ + const int displayLevel = parameters->zParams.notificationLevel; + unsigned iteration = 1; + unsigned d; + unsigned k; + COVER_best_t best; + POOL_ctx *pool = NULL; + int warned = 0; + /* Checks */ + if (splitPoint <= 0 || splitPoint > 1) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect splitPoint\n"); + return ERROR(parameter_outOfBound); + } + if (accel == 0 || accel > FASTCOVER_MAX_ACCEL) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect accel\n"); + return ERROR(parameter_outOfBound); + } + if (kMinK < kMaxD || kMaxK < kMinK) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect k\n"); + return ERROR(parameter_outOfBound); + } + if (nbSamples == 0) { + LOCALDISPLAYLEVEL(displayLevel, 1, "FASTCOVER must have at least one input file\n"); + return ERROR(srcSize_wrong); + } + if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) { + LOCALDISPLAYLEVEL(displayLevel, 1, "dictBufferCapacity must be at least %u\n", + ZDICT_DICTSIZE_MIN); + return ERROR(dstSize_tooSmall); + } + if (nbThreads > 1) { + pool = POOL_create(nbThreads, 1); + if (!pool) { + return ERROR(memory_allocation); + } + } + /* Initialization */ + COVER_best_init(&best); + memset(&coverParams, 0 , sizeof(coverParams)); + FASTCOVER_convertToCoverParams(*parameters, &coverParams); + accelParams = FASTCOVER_defaultAccelParameters[accel]; + /* Turn down global display level to clean up display at level 2 and below */ + g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1; + /* Loop through d first because each new value needs a new context */ + LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n", + kIterations); + for (d = kMinD; d <= kMaxD; d += 2) { + /* Initialize the context for this value of d */ + FASTCOVER_ctx_t ctx; + LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d); + { + size_t const initVal = FASTCOVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint, f, accelParams); + if (ZSTD_isError(initVal)) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n"); + COVER_best_destroy(&best); + POOL_free(pool); + return initVal; + } + } + if (!warned) { + COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.nbDmers, displayLevel); + warned = 1; + } + /* Loop through k reusing the same context */ + for (k = kMinK; k <= kMaxK; k += kStepSize) { + /* Prepare the arguments */ + FASTCOVER_tryParameters_data_t *data = (FASTCOVER_tryParameters_data_t *)malloc( + sizeof(FASTCOVER_tryParameters_data_t)); + LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k); + if (!data) { + LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n"); + COVER_best_destroy(&best); + FASTCOVER_ctx_destroy(&ctx); + POOL_free(pool); + return ERROR(memory_allocation); + } + data->ctx = &ctx; + data->best = &best; + data->dictBufferCapacity = dictBufferCapacity; + data->parameters = coverParams; + data->parameters.k = k; + data->parameters.d = d; + data->parameters.splitPoint = splitPoint; + data->parameters.steps = kSteps; + data->parameters.shrinkDict = shrinkDict; + data->parameters.zParams.notificationLevel = g_displayLevel; + /* Check the parameters */ + if (!FASTCOVER_checkParameters(data->parameters, dictBufferCapacity, + data->ctx->f, accel)) { + DISPLAYLEVEL(1, "FASTCOVER parameters incorrect\n"); + free(data); + continue; + } + /* Call the function and pass ownership of data to it */ + COVER_best_start(&best); + if (pool) { + POOL_add(pool, &FASTCOVER_tryParameters, data); + } else { + FASTCOVER_tryParameters(data); + } + /* Print status */ + LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%% ", + (unsigned)((iteration * 100) / kIterations)); + ++iteration; + } + COVER_best_wait(&best); + FASTCOVER_ctx_destroy(&ctx); + } + LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", ""); + /* Fill the output buffer and parameters with output of the best parameters */ + { + const size_t dictSize = best.dictSize; + if (ZSTD_isError(best.compressedSize)) { + const size_t compressedSize = best.compressedSize; + COVER_best_destroy(&best); + POOL_free(pool); + return compressedSize; + } + FASTCOVER_convertToFastCoverParams(best.parameters, parameters, f, accel); + memcpy(dictBuffer, best.dict, dictSize); + COVER_best_destroy(&best); + POOL_free(pool); + return dictSize; + } + +} diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/zdict.c b/src/borg/algorithms/zstd/lib/dictBuilder/zdict.c new file mode 100644 index 0000000000..6d0b04231b --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/zdict.c @@ -0,0 +1,1135 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/*-************************************** +* Tuning parameters +****************************************/ +#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */ +#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20) +#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO) + + +/*-************************************** +* Compiler Options +****************************************/ +/* Unix Large Files support (>4GB) */ +#define _FILE_OFFSET_BITS 64 +#if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */ +# define _LARGEFILE_SOURCE +#elif ! defined(__LP64__) /* No point defining Large file for 64 bit */ +# define _LARGEFILE64_SOURCE +#endif + + +/*-************************************* +* Dependencies +***************************************/ +#include /* malloc, free */ +#include /* memset */ +#include /* fprintf, fopen, ftello64 */ +#include /* clock */ + +#include "../common/mem.h" /* read */ +#include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */ +#define HUF_STATIC_LINKING_ONLY +#include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */ +#include "../common/zstd_internal.h" /* includes zstd.h */ +#include "../common/xxhash.h" /* XXH64 */ +#include "divsufsort.h" +#ifndef ZDICT_STATIC_LINKING_ONLY +# define ZDICT_STATIC_LINKING_ONLY +#endif +#include "zdict.h" +#include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */ + + +/*-************************************* +* Constants +***************************************/ +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define DICTLISTSIZE_DEFAULT 10000 + +#define NOISELENGTH 32 + +static const int g_compressionLevel_default = 3; +static const U32 g_selectivity_default = 9; + + +/*-************************************* +* Console display +***************************************/ +#define DISPLAY(...) { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } +#define DISPLAYLEVEL(l, ...) if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */ + +static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; } + +static void ZDICT_printHex(const void* ptr, size_t length) +{ + const BYTE* const b = (const BYTE*)ptr; + size_t u; + for (u=0; u126) c = '.'; /* non-printable char */ + DISPLAY("%c", c); + } +} + + +/*-******************************************************** +* Helper functions +**********************************************************/ +unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); } + +const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } + +unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize) +{ + if (dictSize < 8) return 0; + if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0; + return MEM_readLE32((const char*)dictBuffer + 4); +} + +size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize) +{ + size_t headerSize; + if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted); + + { unsigned offcodeMaxValue = MaxOff; + ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t)); + U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE); + short* offcodeNCount = (short*)malloc((MaxOff+1)*sizeof(short)); + if (!bs || !wksp || !offcodeNCount) { + headerSize = ERROR(memory_allocation); + } else { + ZSTD_reset_compressedBlockState(bs); + headerSize = ZSTD_loadCEntropy(bs, wksp, offcodeNCount, &offcodeMaxValue, dictBuffer, dictSize); + } + + free(bs); + free(wksp); + free(offcodeNCount); + } + + return headerSize; +} + +/*-******************************************************** +* Dictionary training functions +**********************************************************/ +static unsigned ZDICT_NbCommonBytes (size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + _BitScanForward( &r, (U32)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +/*! ZDICT_count() : + Count the nb of common bytes between 2 pointers. + Note : this function presumes end of buffer followed by noisy guard band. +*/ +static size_t ZDICT_count(const void* pIn, const void* pMatch) +{ + const char* const pStart = (const char*)pIn; + for (;;) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { + pIn = (const char*)pIn+sizeof(size_t); + pMatch = (const char*)pMatch+sizeof(size_t); + continue; + } + pIn = (const char*)pIn+ZDICT_NbCommonBytes(diff); + return (size_t)((const char*)pIn - pStart); + } +} + + +typedef struct { + U32 pos; + U32 length; + U32 savings; +} dictItem; + +static void ZDICT_initDictItem(dictItem* d) +{ + d->pos = 1; + d->length = 0; + d->savings = (U32)(-1); +} + + +#define LLIMIT 64 /* heuristic determined experimentally */ +#define MINMATCHLENGTH 7 /* heuristic determined experimentally */ +static dictItem ZDICT_analyzePos( + BYTE* doneMarks, + const int* suffix, U32 start, + const void* buffer, U32 minRatio, U32 notificationLevel) +{ + U32 lengthList[LLIMIT] = {0}; + U32 cumulLength[LLIMIT] = {0}; + U32 savings[LLIMIT] = {0}; + const BYTE* b = (const BYTE*)buffer; + size_t maxLength = LLIMIT; + size_t pos = suffix[start]; + U32 end = start; + dictItem solution; + + /* init */ + memset(&solution, 0, sizeof(solution)); + doneMarks[pos] = 1; + + /* trivial repetition cases */ + if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2)) + ||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3)) + ||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) { + /* skip and mark segment */ + U16 const pattern16 = MEM_read16(b+pos+4); + U32 u, patternEnd = 6; + while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ; + if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++; + for (u=1; u= MINMATCHLENGTH); + } + + /* look backward */ + { size_t length; + do { + length = ZDICT_count(b + pos, b + *(suffix+start-1)); + if (length >=MINMATCHLENGTH) start--; + } while(length >= MINMATCHLENGTH); + } + + /* exit if not found a minimum nb of repetitions */ + if (end-start < minRatio) { + U32 idx; + for(idx=start; idx= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos); + DISPLAYLEVEL(4, "\n"); + + for (mml = MINMATCHLENGTH ; ; mml++) { + BYTE currentChar = 0; + U32 currentCount = 0; + U32 currentID = refinedStart; + U32 id; + U32 selectedCount = 0; + U32 selectedID = currentID; + for (id =refinedStart; id < refinedEnd; id++) { + if (b[suffix[id] + mml] != currentChar) { + if (currentCount > selectedCount) { + selectedCount = currentCount; + selectedID = currentID; + } + currentID = id; + currentChar = b[ suffix[id] + mml]; + currentCount = 0; + } + currentCount ++; + } + if (currentCount > selectedCount) { /* for last */ + selectedCount = currentCount; + selectedID = currentID; + } + + if (selectedCount < minRatio) + break; + refinedStart = selectedID; + refinedEnd = refinedStart + selectedCount; + } + + /* evaluate gain based on new dict */ + start = refinedStart; + pos = suffix[refinedStart]; + end = start; + memset(lengthList, 0, sizeof(lengthList)); + + /* look forward */ + { size_t length; + do { + end++; + length = ZDICT_count(b + pos, b + suffix[end]); + if (length >= LLIMIT) length = LLIMIT-1; + lengthList[length]++; + } while (length >=MINMATCHLENGTH); + } + + /* look backward */ + { size_t length = MINMATCHLENGTH; + while ((length >= MINMATCHLENGTH) & (start > 0)) { + length = ZDICT_count(b + pos, b + suffix[start - 1]); + if (length >= LLIMIT) length = LLIMIT - 1; + lengthList[length]++; + if (length >= MINMATCHLENGTH) start--; + } + } + + /* largest useful length */ + memset(cumulLength, 0, sizeof(cumulLength)); + cumulLength[maxLength-1] = lengthList[maxLength-1]; + for (i=(int)(maxLength-2); i>=0; i--) + cumulLength[i] = cumulLength[i+1] + lengthList[i]; + + for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break; + maxLength = i; + + /* reduce maxLength in case of final into repetitive data */ + { U32 l = (U32)maxLength; + BYTE const c = b[pos + maxLength-1]; + while (b[pos+l-2]==c) l--; + maxLength = l; + } + if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */ + + /* calculate savings */ + savings[5] = 0; + for (i=MINMATCHLENGTH; i<=(int)maxLength; i++) + savings[i] = savings[i-1] + (lengthList[i] * (i-3)); + + DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n", + (unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / maxLength); + + solution.pos = (U32)pos; + solution.length = (U32)maxLength; + solution.savings = savings[maxLength]; + + /* mark positions done */ + { U32 id; + for (id=start; id solution.length) length = solution.length; + } + pEnd = (U32)(testedPos + length); + for (p=testedPos; ppos; + const U32 eltEnd = elt.pos + elt.length; + const char* const buf = (const char*) buffer; + + /* tail overlap */ + U32 u; for (u=1; u elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */ + /* append */ + U32 const addedLength = table[u].pos - elt.pos; + table[u].length += addedLength; + table[u].pos = elt.pos; + table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ + table[u].savings += elt.length / 8; /* rough approx bonus */ + elt = table[u]; + /* sort : improve rank */ + while ((u>1) && (table[u-1].savings < elt.savings)) + table[u] = table[u-1], u--; + table[u] = elt; + return u; + } } + + /* front overlap */ + for (u=1; u= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */ + /* append */ + int const addedLength = (int)eltEnd - (table[u].pos + table[u].length); + table[u].savings += elt.length / 8; /* rough approx bonus */ + if (addedLength > 0) { /* otherwise, elt fully included into existing */ + table[u].length += addedLength; + table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */ + } + /* sort : improve rank */ + elt = table[u]; + while ((u>1) && (table[u-1].savings < elt.savings)) + table[u] = table[u-1], u--; + table[u] = elt; + return u; + } + + if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) { + if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) { + size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 ); + table[u].pos = elt.pos; + table[u].savings += (U32)(elt.savings * addedLength / elt.length); + table[u].length = MIN(elt.length, table[u].length + 1); + return u; + } + } + } + + return 0; +} + + +static void ZDICT_removeDictItem(dictItem* table, U32 id) +{ + /* convention : table[0].pos stores nb of elts */ + U32 const max = table[0].pos; + U32 u; + if (!id) return; /* protection, should never happen */ + for (u=id; upos--; +} + + +static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer) +{ + /* merge if possible */ + U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer); + if (mergeId) { + U32 newMerge = 1; + while (newMerge) { + newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer); + if (newMerge) ZDICT_removeDictItem(table, mergeId); + mergeId = newMerge; + } + return; + } + + /* insert */ + { U32 current; + U32 nextElt = table->pos; + if (nextElt >= maxSize) nextElt = maxSize-1; + current = nextElt-1; + while (table[current].savings < elt.savings) { + table[current+1] = table[current]; + current--; + } + table[current+1] = elt; + table->pos = nextElt+1; + } +} + + +static U32 ZDICT_dictSize(const dictItem* dictList) +{ + U32 u, dictSize = 0; + for (u=1; u=l) { \ + if (ZDICT_clockSpan(displayClock) > refreshRate) \ + { displayClock = clock(); DISPLAY(__VA_ARGS__); \ + if (notificationLevel>=4) fflush(stderr); } } + + /* init */ + DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ + if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) { + result = ERROR(memory_allocation); + goto _cleanup; + } + if (minRatio < MINRATIO) minRatio = MINRATIO; + memset(doneMarks, 0, bufferSize+16); + + /* limit sample set size (divsufsort limitation)*/ + if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20)); + while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles]; + + /* sort */ + DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20)); + { int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0); + if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; } + } + suffix[bufferSize] = (int)bufferSize; /* leads into noise */ + suffix0[0] = (int)bufferSize; /* leads into noise */ + /* build reverse suffix sort */ + { size_t pos; + for (pos=0; pos < bufferSize; pos++) + reverseSuffix[suffix[pos]] = (U32)pos; + /* note filePos tracks borders between samples. + It's not used at this stage, but planned to become useful in a later update */ + filePos[0] = 0; + for (pos=1; pos> 21); + } +} + + +typedef struct +{ + ZSTD_CDict* dict; /* dictionary */ + ZSTD_CCtx* zc; /* working context */ + void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */ +} EStats_ress_t; + +#define MAXREPOFFSET 1024 + +static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params, + unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets, + const void* src, size_t srcSize, + U32 notificationLevel) +{ + size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog); + size_t cSize; + + if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */ + { size_t const errorCode = ZSTD_compressBegin_usingCDict(esr.zc, esr.dict); + if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; } + + } + cSize = ZSTD_compressBlock(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize); + if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; } + + if (cSize) { /* if == 0; block is not compressible */ + const seqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc); + + /* literals stats */ + { const BYTE* bytePtr; + for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++) + countLit[*bytePtr]++; + } + + /* seqStats */ + { U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + ZSTD_seqToCodes(seqStorePtr); + + { const BYTE* codePtr = seqStorePtr->ofCode; + U32 u; + for (u=0; umlCode; + U32 u; + for (u=0; ullCode; + U32 u; + for (u=0; u= 2) { /* rep offsets */ + const seqDef* const seq = seqStorePtr->sequencesStart; + U32 offset1 = seq[0].offset - 3; + U32 offset2 = seq[1].offset - 3; + if (offset1 >= MAXREPOFFSET) offset1 = 0; + if (offset2 >= MAXREPOFFSET) offset2 = 0; + repOffsets[offset1] += 3; + repOffsets[offset2] += 1; + } } } +} + +static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles) +{ + size_t total=0; + unsigned u; + for (u=0; u0; u--) { + offsetCount_t tmp; + if (table[u-1].count >= table[u].count) break; + tmp = table[u-1]; + table[u-1] = table[u]; + table[u] = tmp; + } +} + +/* ZDICT_flatLit() : + * rewrite `countLit` to contain a mostly flat but still compressible distribution of literals. + * necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode. + */ +static void ZDICT_flatLit(unsigned* countLit) +{ + int u; + for (u=1; u<256; u++) countLit[u] = 2; + countLit[0] = 4; + countLit[253] = 1; + countLit[254] = 1; +} + +#define OFFCODE_MAX 30 /* only applicable to first block */ +static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize, + unsigned compressionLevel, + const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles, + const void* dictBuffer, size_t dictBufferSize, + unsigned notificationLevel) +{ + unsigned countLit[256]; + HUF_CREATE_STATIC_CTABLE(hufTable, 255); + unsigned offcodeCount[OFFCODE_MAX+1]; + short offcodeNCount[OFFCODE_MAX+1]; + U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB)); + unsigned matchLengthCount[MaxML+1]; + short matchLengthNCount[MaxML+1]; + unsigned litLengthCount[MaxLL+1]; + short litLengthNCount[MaxLL+1]; + U32 repOffset[MAXREPOFFSET]; + offsetCount_t bestRepOffset[ZSTD_REP_NUM+1]; + EStats_ress_t esr = { NULL, NULL, NULL }; + ZSTD_parameters params; + U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total; + size_t pos = 0, errorCode; + size_t eSize = 0; + size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); + size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles); + BYTE* dstPtr = (BYTE*)dstBuffer; + + /* init */ + DEBUGLOG(4, "ZDICT_analyzeEntropy"); + if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */ + for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */ + for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1; + for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1; + for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1; + memset(repOffset, 0, sizeof(repOffset)); + repOffset[1] = repOffset[4] = repOffset[8] = 1; + memset(bestRepOffset, 0, sizeof(bestRepOffset)); + if (compressionLevel==0) compressionLevel = g_compressionLevel_default; + params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize); + + esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem); + esr.zc = ZSTD_createCCtx(); + esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX); + if (!esr.dict || !esr.zc || !esr.workPlace) { + eSize = ERROR(memory_allocation); + DISPLAYLEVEL(1, "Not enough memory \n"); + goto _cleanup; + } + + /* collect stats on all samples */ + for (u=0; u dictBufferCapacity) dictContentSize = dictBufferCapacity - hSize; + { size_t const dictSize = hSize + dictContentSize; + char* dictEnd = (char*)dictBuffer + dictSize; + memmove(dictEnd - dictContentSize, customDictContent, dictContentSize); + memcpy(dictBuffer, header, hSize); + return dictSize; + } +} + + +static size_t ZDICT_addEntropyTablesFromBuffer_advanced( + void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t params) +{ + int const compressionLevel = (params.compressionLevel == 0) ? g_compressionLevel_default : params.compressionLevel; + U32 const notificationLevel = params.notificationLevel; + size_t hSize = 8; + + /* calculate entropy tables */ + DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */ + DISPLAYLEVEL(2, "statistics ... \n"); + { size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize, + compressionLevel, + samplesBuffer, samplesSizes, nbSamples, + (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, + notificationLevel); + if (ZDICT_isError(eSize)) return eSize; + hSize += eSize; + } + + /* add dictionary header (after entropy tables) */ + MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY); + { U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0); + U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768; + U32 const dictID = params.dictID ? params.dictID : compliantID; + MEM_writeLE32((char*)dictBuffer+4, dictID); + } + + if (hSize + dictContentSize < dictBufferCapacity) + memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize); + return MIN(dictBufferCapacity, hSize+dictContentSize); +} + +/* Hidden declaration for dbio.c */ +size_t ZDICT_trainFromBuffer_unsafe_legacy( + void* dictBuffer, size_t maxDictSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t params); +/*! ZDICT_trainFromBuffer_unsafe_legacy() : +* Warning : `samplesBuffer` must be followed by noisy guard band. +* @return : size of dictionary, or an error code which can be tested with ZDICT_isError() +*/ +size_t ZDICT_trainFromBuffer_unsafe_legacy( + void* dictBuffer, size_t maxDictSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t params) +{ + U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16)); + dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList)); + unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel; + unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity; + size_t const targetDictSize = maxDictSize; + size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); + size_t dictSize = 0; + U32 const notificationLevel = params.zParams.notificationLevel; + + /* checks */ + if (!dictList) return ERROR(memory_allocation); + if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */ + if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */ + + /* init */ + ZDICT_initDictItem(dictList); + + /* build dictionary */ + ZDICT_trainBuffer_legacy(dictList, dictListSize, + samplesBuffer, samplesBuffSize, + samplesSizes, nbSamples, + minRep, notificationLevel); + + /* display best matches */ + if (params.zParams.notificationLevel>= 3) { + unsigned const nb = MIN(25, dictList[0].pos); + unsigned const dictContentSize = ZDICT_dictSize(dictList); + unsigned u; + DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize); + DISPLAYLEVEL(3, "list %u best segments \n", nb-1); + for (u=1; u samplesBuffSize) || ((pos + length) > samplesBuffSize)) { + free(dictList); + return ERROR(GENERIC); /* should never happen */ + } + DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |", + u, length, pos, (unsigned)dictList[u].savings); + ZDICT_printHex((const char*)samplesBuffer+pos, printedLength); + DISPLAYLEVEL(3, "| \n"); + } } + + + /* create dictionary */ + { unsigned dictContentSize = ZDICT_dictSize(dictList); + if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */ + if (dictContentSize < targetDictSize/4) { + DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize); + if (samplesBuffSize < 10 * targetDictSize) + DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20)); + if (minRep > MINRATIO) { + DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1); + DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n"); + } + } + + if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) { + unsigned proposedSelectivity = selectivity-1; + while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; } + DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize); + DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity); + DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n"); + } + + /* limit dictionary size */ + { U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */ + U32 currentSize = 0; + U32 n; for (n=1; n targetDictSize) { currentSize -= dictList[n].length; break; } + } + dictList->pos = n; + dictContentSize = currentSize; + } + + /* build dict content */ + { U32 u; + BYTE* ptr = (BYTE*)dictBuffer + maxDictSize; + for (u=1; upos; u++) { + U32 l = dictList[u].length; + ptr -= l; + if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */ + memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l); + } } + + dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize, + samplesBuffer, samplesSizes, nbSamples, + params.zParams); + } + + /* clean up */ + free(dictList); + return dictSize; +} + + +/* ZDICT_trainFromBuffer_legacy() : + * issue : samplesBuffer need to be followed by a noisy guard band. + * work around : duplicate the buffer, and add the noise */ +size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t params) +{ + size_t result; + void* newBuff; + size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples); + if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */ + + newBuff = malloc(sBuffSize + NOISELENGTH); + if (!newBuff) return ERROR(memory_allocation); + + memcpy(newBuff, samplesBuffer, sBuffSize); + ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */ + + result = + ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff, + samplesSizes, nbSamples, params); + free(newBuff); + return result; +} + + +size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) +{ + ZDICT_fastCover_params_t params; + DEBUGLOG(3, "ZDICT_trainFromBuffer"); + memset(¶ms, 0, sizeof(params)); + params.d = 8; + params.steps = 4; + /* Default to level 6 since no compression level information is available */ + params.zParams.compressionLevel = 3; +#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1) + params.zParams.notificationLevel = DEBUGLEVEL; +#endif + return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity, + samplesBuffer, samplesSizes, nbSamples, + ¶ms); +} + +size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples) +{ + ZDICT_params_t params; + memset(¶ms, 0, sizeof(params)); + return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity, + samplesBuffer, samplesSizes, nbSamples, + params); +} diff --git a/src/borg/algorithms/zstd/lib/dictBuilder/zdict.h b/src/borg/algorithms/zstd/lib/dictBuilder/zdict.h new file mode 100644 index 0000000000..ff2e77faf7 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/dictBuilder/zdict.h @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef DICTBUILDER_H_001 +#define DICTBUILDER_H_001 + +#if defined (__cplusplus) +extern "C" { +#endif + + +/*====== Dependencies ======*/ +#include /* size_t */ + + +/* ===== ZDICTLIB_API : control library symbols visibility ===== */ +#ifndef ZDICTLIB_VISIBILITY +# if defined(__GNUC__) && (__GNUC__ >= 4) +# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default"))) +# else +# define ZDICTLIB_VISIBILITY +# endif +#endif +#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY +#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +#else +# define ZDICTLIB_API ZDICTLIB_VISIBILITY +#endif + + +/*! ZDICT_trainFromBuffer(): + * Train a dictionary from an array of samples. + * Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4, + * f=20, and accel=1. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * Note: Dictionary training will fail if there are not enough samples to construct a + * dictionary, or if most of the samples are too small (< 8 bytes being the lower limit). + * If dictionary training fails, you should use zstd without a dictionary, as the dictionary + * would've been ineffective anyways. If you believe your samples would benefit from a dictionary + * please open an issue with details, and we can look into it. + * Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, + const size_t* samplesSizes, unsigned nbSamples); + +typedef struct { + int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */ + unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */ + unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value) */ +} ZDICT_params_t; + +/*! ZDICT_finalizeDictionary(): + * Given a custom content as a basis for dictionary, and a set of samples, + * finalize dictionary by adding headers and statistics according to the zstd + * dictionary format. + * + * Samples must be stored concatenated in a flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each + * sample in order. The samples are used to construct the statistics, so they + * should be representative of what you will compress with this dictionary. + * + * The compression level can be set in `parameters`. You should pass the + * compression level you expect to use in production. The statistics for each + * compression level differ, so tuning the dictionary for the compression level + * can help quite a bit. + * + * You can set an explicit dictionary ID in `parameters`, or allow us to pick + * a random dictionary ID for you, but we can't guarantee no collisions. + * + * The dstDictBuffer and the dictContent may overlap, and the content will be + * appended to the end of the header. If the header + the content doesn't fit in + * maxDictSize the beginning of the content is truncated to make room, since it + * is presumed that the most profitable content is at the end of the dictionary, + * since that is the cheapest to reference. + * + * `dictContentSize` must be >= ZDICT_CONTENTSIZE_MIN bytes. + * `maxDictSize` must be >= max(dictContentSize, ZSTD_DICTSIZE_MIN). + * + * @return: size of dictionary stored into `dstDictBuffer` (<= `maxDictSize`), + * or an error code, which can be tested by ZDICT_isError(). + * Note: ZDICT_finalizeDictionary() will push notifications into stderr if + * instructed to, using notificationLevel>0. + * NOTE: This function currently may fail in several edge cases including: + * * Not enough samples + * * Samples are uncompressible + * * Samples are all exactly the same + */ +ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dstDictBuffer, size_t maxDictSize, + const void* dictContent, size_t dictContentSize, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_params_t parameters); + + +/*====== Helper functions ======*/ +ZDICTLIB_API unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize); /**< extracts dictID; @return zero if error (not a valid dictionary) */ +ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize); /* returns dict header size; returns a ZSTD error code on failure */ +ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode); +ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode); + + + +#ifdef ZDICT_STATIC_LINKING_ONLY + +/* ==================================================================================== + * The definitions in this section are considered experimental. + * They should never be used with a dynamic library, as they may change in the future. + * They are provided for advanced usages. + * Use them only in association with static linking. + * ==================================================================================== */ + +#define ZDICT_CONTENTSIZE_MIN 128 +#define ZDICT_DICTSIZE_MIN 256 + +/*! ZDICT_cover_params_t: + * k and d are the only required parameters. + * For others, value 0 means default. + */ +typedef struct { + unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ + unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (1.0), 1.0 when all samples are used for both training and testing */ + unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + ZDICT_params_t zParams; +} ZDICT_cover_params_t; + +typedef struct { + unsigned k; /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */ + unsigned d; /* dmer size : constraint: 0 < d <= k : Reasonable range [6, 16] */ + unsigned f; /* log of size of frequency array : constraint: 0 < f <= 31 : 1 means default(20)*/ + unsigned steps; /* Number of steps : Only used for optimization : 0 means default (40) : Higher means more parameters checked */ + unsigned nbThreads; /* Number of threads : constraint: 0 < nbThreads : 1 means single-threaded : Only used for optimization : Ignored if ZSTD_MULTITHREAD is not defined */ + double splitPoint; /* Percentage of samples used for training: Only used for optimization : the first nbSamples * splitPoint samples will be used to training, the last nbSamples * (1 - splitPoint) samples will be used for testing, 0 means default (0.75), 1.0 when all samples are used for both training and testing */ + unsigned accel; /* Acceleration level: constraint: 0 < accel <= 10, higher means faster and less accurate, 0 means default(1) */ + unsigned shrinkDict; /* Train dictionaries to shrink in size starting from the minimum size and selects the smallest dictionary that is shrinkDictMaxRegression% worse than the largest dictionary. 0 means no shrinking and 1 means shrinking */ + unsigned shrinkDictMaxRegression; /* Sets shrinkDictMaxRegression so that a smaller dictionary can be at worse shrinkDictMaxRegression% worse than the max dict size dictionary. */ + + ZDICT_params_t zParams; +} ZDICT_fastCover_params_t; + +/*! ZDICT_trainFromBuffer_cover(): + * Train a dictionary from an array of samples using the COVER algorithm. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_trainFromBuffer_cover() requires about 9 bytes of memory for each input byte. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover( + void *dictBuffer, size_t dictBufferCapacity, + const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t parameters); + +/*! ZDICT_optimizeTrainFromBuffer_cover(): + * The same requirements as above hold for all the parameters except `parameters`. + * This function tries many parameter combinations and picks the best parameters. + * `*parameters` is filled with the best parameters found, + * dictionary constructed with those parameters is stored in `dictBuffer`. + * + * All of the parameters d, k, steps are optional. + * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. + * if steps is zero it defaults to its default value. + * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. + * + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * On success `*parameters` contains the parameters selected. + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread. + */ +ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover( + void* dictBuffer, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples, + ZDICT_cover_params_t* parameters); + +/*! ZDICT_trainFromBuffer_fastCover(): + * Train a dictionary from an array of samples using a modified version of COVER algorithm. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * d and k are required. + * All other parameters are optional, will use default values if not provided + * The resulting dictionary will be saved into `dictBuffer`. + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_trainFromBuffer_fastCover() requires 6 * 2^f bytes of memory. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer, + size_t dictBufferCapacity, const void *samplesBuffer, + const size_t *samplesSizes, unsigned nbSamples, + ZDICT_fastCover_params_t parameters); + +/*! ZDICT_optimizeTrainFromBuffer_fastCover(): + * The same requirements as above hold for all the parameters except `parameters`. + * This function tries many parameter combinations (specifically, k and d combinations) + * and picks the best parameters. `*parameters` is filled with the best parameters found, + * dictionary constructed with those parameters is stored in `dictBuffer`. + * All of the parameters d, k, steps, f, and accel are optional. + * If d is non-zero then we don't check multiple values of d, otherwise we check d = {6, 8}. + * if steps is zero it defaults to its default value. + * If k is non-zero then we don't check multiple values of k, otherwise we check steps values in [50, 2000]. + * If f is zero, default value of 20 is used. + * If accel is zero, default value of 1 is used. + * + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * On success `*parameters` contains the parameters selected. + * See ZDICT_trainFromBuffer() for details on failure modes. + * Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread. + */ +ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer, + size_t dictBufferCapacity, const void* samplesBuffer, + const size_t* samplesSizes, unsigned nbSamples, + ZDICT_fastCover_params_t* parameters); + +typedef struct { + unsigned selectivityLevel; /* 0 means default; larger => select more => larger dictionary */ + ZDICT_params_t zParams; +} ZDICT_legacy_params_t; + +/*! ZDICT_trainFromBuffer_legacy(): + * Train a dictionary from an array of samples. + * Samples must be stored concatenated in a single flat buffer `samplesBuffer`, + * supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order. + * The resulting dictionary will be saved into `dictBuffer`. + * `parameters` is optional and can be provided with values set to 0 to mean "default". + * @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`) + * or an error code, which can be tested with ZDICT_isError(). + * See ZDICT_trainFromBuffer() for details on failure modes. + * Tips: In general, a reasonable dictionary has a size of ~ 100 KB. + * It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`. + * In general, it's recommended to provide a few thousands samples, though this can vary a lot. + * It's recommended that total size of all samples be about ~x100 times the target size of dictionary. + * Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0. + */ +ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy( + void *dictBuffer, size_t dictBufferCapacity, + const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples, + ZDICT_legacy_params_t parameters); + +/* Deprecation warnings */ +/* It is generally possible to disable deprecation warnings from compiler, + for example with -Wno-deprecated-declarations for gcc + or _CRT_SECURE_NO_WARNINGS in Visual. + Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */ +#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS +# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */ +#else +# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ +# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API +# elif (ZDICT_GCC_VERSION >= 405) || defined(__clang__) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message))) +# elif (ZDICT_GCC_VERSION >= 301) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message)) +# else +# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler") +# define ZDICT_DEPRECATED(message) ZDICTLIB_API +# endif +#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */ + +ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead") +size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity, + const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples); + + +#endif /* ZDICT_STATIC_LINKING_ONLY */ + +#if defined (__cplusplus) +} +#endif + +#endif /* DICTBUILDER_H_001 */ diff --git a/src/borg/algorithms/zstd/lib/legacy/zstd_legacy.h b/src/borg/algorithms/zstd/lib/legacy/zstd_legacy.h new file mode 100644 index 0000000000..6bea6a519a --- /dev/null +++ b/src/borg/algorithms/zstd/lib/legacy/zstd_legacy.h @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_LEGACY_H +#define ZSTD_LEGACY_H + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ************************************* +* Includes +***************************************/ +#include "../common/mem.h" /* MEM_STATIC */ +#include "../common/error_private.h" /* ERROR */ +#include "../common/zstd_internal.h" /* ZSTD_inBuffer, ZSTD_outBuffer, ZSTD_frameSizeInfo */ + +#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0) +# undef ZSTD_LEGACY_SUPPORT +# define ZSTD_LEGACY_SUPPORT 8 +#endif + +#if (ZSTD_LEGACY_SUPPORT <= 1) +# include "zstd_v01.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) +# include "zstd_v02.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) +# include "zstd_v03.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) +# include "zstd_v04.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) +# include "zstd_v05.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) +# include "zstd_v06.h" +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) +# include "zstd_v07.h" +#endif + +/** ZSTD_isLegacy() : + @return : > 0 if supported by legacy decoder. 0 otherwise. + return value is the version. +*/ +MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize) +{ + U32 magicNumberLE; + if (srcSize<4) return 0; + magicNumberLE = MEM_readLE32(src); + switch(magicNumberLE) + { +#if (ZSTD_LEGACY_SUPPORT <= 1) + case ZSTDv01_magicNumberLE:return 1; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) + case ZSTDv02_magicNumber : return 2; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) + case ZSTDv03_magicNumber : return 3; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) + case ZSTDv04_magicNumber : return 4; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case ZSTDv05_MAGICNUMBER : return 5; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case ZSTDv06_MAGICNUMBER : return 6; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case ZSTDv07_MAGICNUMBER : return 7; +#endif + default : return 0; + } +} + + +MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, size_t srcSize) +{ + U32 const version = ZSTD_isLegacy(src, srcSize); + if (version < 5) return 0; /* no decompressed size in frame header, or not a legacy format */ +#if (ZSTD_LEGACY_SUPPORT <= 5) + if (version==5) { + ZSTDv05_parameters fParams; + size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize); + if (frResult != 0) return 0; + return fParams.srcSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + if (version==6) { + ZSTDv06_frameParams fParams; + size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize); + if (frResult != 0) return 0; + return fParams.frameContentSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + if (version==7) { + ZSTDv07_frameParams fParams; + size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize); + if (frResult != 0) return 0; + return fParams.frameContentSize; + } +#endif + return 0; /* should not be possible */ +} + + +MEM_STATIC size_t ZSTD_decompressLegacy( + void* dst, size_t dstCapacity, + const void* src, size_t compressedSize, + const void* dict,size_t dictSize) +{ + U32 const version = ZSTD_isLegacy(src, compressedSize); + (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ + switch(version) + { +#if (ZSTD_LEGACY_SUPPORT <= 1) + case 1 : + return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) + case 2 : + return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) + case 3 : + return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + { size_t result; + ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx(); + if (zd==NULL) return ERROR(memory_allocation); + result = ZSTDv05_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); + ZSTDv05_freeDCtx(zd); + return result; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + { size_t result; + ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx(); + if (zd==NULL) return ERROR(memory_allocation); + result = ZSTDv06_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); + ZSTDv06_freeDCtx(zd); + return result; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + { size_t result; + ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx(); + if (zd==NULL) return ERROR(memory_allocation); + result = ZSTDv07_decompress_usingDict(zd, dst, dstCapacity, src, compressedSize, dict, dictSize); + ZSTDv07_freeDCtx(zd); + return result; + } +#endif + default : + return ERROR(prefix_unknown); + } +} + +MEM_STATIC ZSTD_frameSizeInfo ZSTD_findFrameSizeInfoLegacy(const void *src, size_t srcSize) +{ + ZSTD_frameSizeInfo frameSizeInfo; + U32 const version = ZSTD_isLegacy(src, srcSize); + switch(version) + { +#if (ZSTD_LEGACY_SUPPORT <= 1) + case 1 : + ZSTDv01_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 2) + case 2 : + ZSTDv02_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 3) + case 3 : + ZSTDv03_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + ZSTDv04_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + ZSTDv05_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + ZSTDv06_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + ZSTDv07_findFrameSizeInfoLegacy(src, srcSize, + &frameSizeInfo.compressedSize, + &frameSizeInfo.decompressedBound); + break; +#endif + default : + frameSizeInfo.compressedSize = ERROR(prefix_unknown); + frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; + break; + } + if (!ZSTD_isError(frameSizeInfo.compressedSize) && frameSizeInfo.compressedSize > srcSize) { + frameSizeInfo.compressedSize = ERROR(srcSize_wrong); + frameSizeInfo.decompressedBound = ZSTD_CONTENTSIZE_ERROR; + } + return frameSizeInfo; +} + +MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src, size_t srcSize) +{ + ZSTD_frameSizeInfo frameSizeInfo = ZSTD_findFrameSizeInfoLegacy(src, srcSize); + return frameSizeInfo.compressedSize; +} + +MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version) +{ + switch(version) + { + default : + case 1 : + case 2 : + case 3 : + (void)legacyContext; + return ERROR(version_unsupported); +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext); +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext); +#endif + } +} + + +MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, + const void* dict, size_t dictSize) +{ + DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); + if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); + switch(newVersion) + { + default : + case 1 : + case 2 : + case 3 : + (void)dict; (void)dictSize; + return 0; +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + { + ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv04_decompressInit(dctx); + ZBUFFv04_decompressWithDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + { + ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv05_decompressInitDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + { + ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv06_decompressInitDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + { + ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext; + if (dctx==NULL) return ERROR(memory_allocation); + ZBUFFv07_decompressInitDictionary(dctx, dict, dictSize); + *legacyContext = dctx; + return 0; + } +#endif + } +} + + + +MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, + ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); + switch(version) + { + default : + case 1 : + case 2 : + case 3 : + (void)legacyContext; (void)output; (void)input; + return ERROR(version_unsupported); +#if (ZSTD_LEGACY_SUPPORT <= 4) + case 4 : + { + ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv04_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 5) + case 5 : + { + ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv05_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 6) + case 6 : + { + ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv06_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif +#if (ZSTD_LEGACY_SUPPORT <= 7) + case 7 : + { + ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext; + const void* src = (const char*)input->src + input->pos; + size_t readSize = input->size - input->pos; + void* dst = (char*)output->dst + output->pos; + size_t decodedSize = output->size - output->pos; + size_t const hintSize = ZBUFFv07_decompressContinue(dctx, dst, &decodedSize, src, &readSize); + output->pos += decodedSize; + input->pos += readSize; + return hintSize; + } +#endif + } +} + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_LEGACY_H */ diff --git a/src/borg/algorithms/zstd/lib/legacy/zstd_v01.c b/src/borg/algorithms/zstd/lib/legacy/zstd_v01.c new file mode 100644 index 0000000000..eb23628330 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/legacy/zstd_v01.c @@ -0,0 +1,2158 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +/****************************************** +* Includes +******************************************/ +#include /* size_t, ptrdiff_t */ +#include "zstd_v01.h" +#include "../common/error_private.h" + + +/****************************************** +* Static allocation +******************************************/ +/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#define FSE_MAX_MEMORY_USAGE 14 +#define FSE_DEFAULT_MEMORY_USAGE 13 + +/* FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#define FSE_MAX_SYMBOL_VALUE 255 + + +/**************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION + + +/**************************************************************** +* Byte symbol type +****************************************************************/ +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + + + +/**************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +#else +# define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/**************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ + + +#ifndef MEM_ACCESS_MODULE +#define MEM_ACCESS_MODULE +/**************************************************************** +* Basic Types +*****************************************************************/ +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# include +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef int16_t S16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +typedef int64_t S64; +#else +typedef unsigned char BYTE; +typedef unsigned short U16; +typedef signed short S16; +typedef unsigned int U32; +typedef signed int S32; +typedef unsigned long long U64; +typedef signed long long S64; +#endif + +#endif /* MEM_ACCESS_MODULE */ + +/**************************************************************** +* Memory I/O +*****************************************************************/ +/* FSE_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets generating assembly depending on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef FSE_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define FSE_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define FSE_FORCE_MEMORY_ACCESS 1 +# endif +#endif + + +static unsigned FSE_32bits(void) +{ + return sizeof(void*)==4; +} + +static unsigned FSE_isLittleEndian(void) +{ + const union { U32 i; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==2) + +static U16 FSE_read16(const void* memPtr) { return *(const U16*) memPtr; } +static U32 FSE_read32(const void* memPtr) { return *(const U32*) memPtr; } +static U64 FSE_read64(const void* memPtr) { return *(const U64*) memPtr; } + +#elif defined(FSE_FORCE_MEMORY_ACCESS) && (FSE_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; + +static U16 FSE_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +static U32 FSE_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +static U64 FSE_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +#else + +static U16 FSE_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U32 FSE_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +static U64 FSE_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +#endif /* FSE_FORCE_MEMORY_ACCESS */ + +static U16 FSE_readLE16(const void* memPtr) +{ + if (FSE_isLittleEndian()) + return FSE_read16(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +static U32 FSE_readLE32(const void* memPtr) +{ + if (FSE_isLittleEndian()) + return FSE_read32(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); + } +} + + +static U64 FSE_readLE64(const void* memPtr) +{ + if (FSE_isLittleEndian()) + return FSE_read64(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); + } +} + +static size_t FSE_readLEST(const void* memPtr) +{ + if (FSE_32bits()) + return (size_t)FSE_readLE32(memPtr); + else + return (size_t)FSE_readLE64(memPtr); +} + + + +/**************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + + +/**************************************************************** +* Error Management +****************************************************************/ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/**************************************************************** +* Complex types +****************************************************************/ +typedef struct +{ + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; /* total 8 bytes */ + +typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + +/**************************************************************** +* Internal functions +****************************************************************/ +FORCE_INLINE unsigned FSE_highbit32 (U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (GCC_VERSION >= 304) /* GCC Intrinsic */ + return __builtin_clz (val) ^ 31; +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + unsigned r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; + return r; +# endif +} + + +/**************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + + +static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } + +#define FSE_DECODE_TYPE FSE_decode_t + + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +static size_t FSE_buildDTable +(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)(ptr) + 1; /* because dt is unsigned, 32-bits aligned on 32-bits */ + const U32 tableSize = 1 << tableLog; + const U32 tableMask = tableSize-1; + const U32 step = FSE_tableStep(tableSize); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + U32 position = 0; + U32 highThreshold = tableSize-1; + const S16 largeLimit= (S16)(1 << (tableLog-1)); + U32 noLarge = 1; + U32 s; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return (size_t)-FSE_ERROR_maxSymbolValue_tooLarge; + if (tableLog > FSE_MAX_TABLELOG) return (size_t)-FSE_ERROR_tableLog_tooLarge; + + /* Init, lay down lowprob symbols */ + DTableH[0].tableLog = (U16)tableLog; + for (s=0; s<=maxSymbolValue; s++) + { + if (normalizedCounter[s]==-1) + { + tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; + symbolNext[s] = 1; + } + else + { + if (normalizedCounter[s] >= largeLimit) noLarge=0; + symbolNext[s] = normalizedCounter[s]; + } + } + + /* Spread symbols */ + for (s=0; s<=maxSymbolValue; s++) + { + int i; + for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } + } + + if (position!=0) return (size_t)-FSE_ERROR_GENERIC; /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + + /* Build Decoding table */ + { + U32 i; + for (i=0; ifastMode = (U16)noLarge; + return 0; +} + + +/****************************************** +* FSE byte symbol +******************************************/ +#ifndef FSE_COMMONDEFS_ONLY + +static unsigned FSE_isError(size_t code) { return (code > (size_t)(-FSE_ERROR_maxCode)); } + +static short FSE_abs(short a) +{ + return a<0? -a : a; +} + + +/**************************************************************** +* Header bitstream management +****************************************************************/ +static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + const BYTE* const istart = (const BYTE*) headerBuffer; + const BYTE* const iend = istart + hbSize; + const BYTE* ip = istart; + int nbBits; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + if (hbSize < 4) return (size_t)-FSE_ERROR_srcSize_wrong; + bitStream = FSE_readLE32(ip); + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return (size_t)-FSE_ERROR_tableLog_tooLarge; + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) && (charnum<=*maxSVPtr)) + { + if (previous0) + { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) + { + n0+=24; + if (ip < iend-5) + { + ip+=2; + bitStream = FSE_readLE32(ip) >> bitCount; + } + else + { + bitStream >>= 16; + bitCount+=16; + } + } + while ((bitStream & 3) == 3) + { + n0+=3; + bitStream>>=2; + bitCount+=2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return (size_t)-FSE_ERROR_maxSymbolValue_tooSmall; + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + bitStream = FSE_readLE32(ip) >> bitCount; + } + else + bitStream >>= 2; + } + { + const short max = (short)((2*threshold-1)-remaining); + short count; + + if ((bitStream & (threshold-1)) < (U32)max) + { + count = (short)(bitStream & (threshold-1)); + bitCount += nbBits-1; + } + else + { + count = (short)(bitStream & (2*threshold-1)); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= FSE_abs(count); + normalizedCounter[charnum++] = count; + previous0 = !count; + while (remaining < threshold) + { + nbBits--; + threshold >>= 1; + } + + { + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = FSE_readLE32(ip) >> (bitCount & 31); + } + } + } + if (remaining != 1) return (size_t)-FSE_ERROR_GENERIC; + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + if ((size_t)(ip-istart) > hbSize) return (size_t)-FSE_ERROR_srcSize_wrong; + return ip-istart; +} + + +/********************************************************* +* Decompression (Byte symbols) +*********************************************************/ +static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + + DTableH->tableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSymbolValue = tableMask; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return (size_t)-FSE_ERROR_GENERIC; /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s<=maxSymbolValue; s++) + { + dinfo[s].newState = 0; + dinfo[s].symbol = (BYTE)s; + dinfo[s].nbBits = (BYTE)nbBits; + } + + return 0; +} + + +/* FSE_initDStream + * Initialize a FSE_DStream_t. + * srcBuffer must point at the beginning of an FSE block. + * The function result is the size of the FSE_block (== srcSize). + * If srcSize is too small, the function will return an errorCode; + */ +static size_t FSE_initDStream(FSE_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) return (size_t)-FSE_ERROR_srcSize_wrong; + + if (srcSize >= sizeof(size_t)) + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); + bitD->bitContainer = FSE_readLEST(bitD->ptr); + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ + bitD->bitsConsumed = 8 - FSE_highbit32(contain32); + } + else + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); + /* fallthrough */ + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); + /* fallthrough */ + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); + /* fallthrough */ + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; + /* fallthrough */ + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; + /* fallthrough */ + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; + /* fallthrough */ + default:; + } + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return (size_t)-FSE_ERROR_GENERIC; /* stop bit not present */ + bitD->bitsConsumed = 8 - FSE_highbit32(contain32); + bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; + } + + return srcSize; +} + + +/*!FSE_lookBits + * Provides next n bits from the bitContainer. + * bitContainer is not modified (bits are still present for next read/look) + * On 32-bits, maxNbBits==25 + * On 64-bits, maxNbBits==57 + * return : value extracted. + */ +static size_t FSE_lookBits(FSE_DStream_t* bitD, U32 nbBits) +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); +} + +static size_t FSE_lookBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); +} + +static void FSE_skipBits(FSE_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + + +/*!FSE_readBits + * Read next n bits from the bitContainer. + * On 32-bits, don't read more than maxNbBits==25 + * On 64-bits, don't read more than maxNbBits==57 + * Use the fast variant *only* if n >= 1. + * return : value extracted. + */ +static size_t FSE_readBits(FSE_DStream_t* bitD, U32 nbBits) +{ + size_t value = FSE_lookBits(bitD, nbBits); + FSE_skipBits(bitD, nbBits); + return value; +} + +static size_t FSE_readBitsFast(FSE_DStream_t* bitD, U32 nbBits) /* only if nbBits >= 1 !! */ +{ + size_t value = FSE_lookBitsFast(bitD, nbBits); + FSE_skipBits(bitD, nbBits); + return value; +} + +static unsigned FSE_reloadDStream(FSE_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ + return FSE_DStream_tooFar; + + if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) + { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = FSE_readLEST(bitD->ptr); + return FSE_DStream_unfinished; + } + if (bitD->ptr == bitD->start) + { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return FSE_DStream_endOfBuffer; + return FSE_DStream_completed; + } + { + U32 nbBytes = bitD->bitsConsumed >> 3; + U32 result = FSE_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) + { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = FSE_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = FSE_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ + return result; + } +} + + +static void FSE_initDState(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD, const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; + DStatePtr->state = FSE_readBits(bitD, DTableH->tableLog); + FSE_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +static BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = FSE_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +static BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, FSE_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = FSE_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +/* FSE_endOfDStream + Tells if bitD has reached end of bitStream or not */ + +static unsigned FSE_endOfDStream(const FSE_DStream_t* bitD) +{ + return ((bitD->ptr == bitD->start) && (bitD->bitsConsumed == sizeof(bitD->bitContainer)*8)); +} + +static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + +FORCE_INLINE size_t FSE_decompress_usingDTable_generic( + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt, const unsigned fast) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = omax-3; + + FSE_DStream_t bitD; + FSE_DState_t state1; + FSE_DState_t state2; + size_t errorCode; + + /* Init */ + errorCode = FSE_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ + if (FSE_isError(errorCode)) return errorCode; + + FSE_initDState(&state1, &bitD, dt); + FSE_initDState(&state2, &bitD, dt); + +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) + + /* 4 symbols per loop */ + for ( ; (FSE_reloadDStream(&bitD)==FSE_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ + FSE_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (FSE_reloadDStream(&bitD) > FSE_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + FSE_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : FSE_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly FSE_DStream_completed */ + while (1) + { + if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) + break; + + *op++ = FSE_GETSYMBOL(&state1); + + if ( (FSE_reloadDStream(&bitD)>FSE_DStream_completed) || (op==omax) || (FSE_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) + break; + + *op++ = FSE_GETSYMBOL(&state2); + } + + /* end ? */ + if (FSE_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) + return op-ostart; + + if (op==omax) return (size_t)-FSE_ERROR_dstSize_tooSmall; /* dst buffer is full, but cSrc unfinished */ + + return (size_t)-FSE_ERROR_corruptionDetected; +} + + +static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + FSE_DTableHeader DTableH; + memcpy(&DTableH, dt, sizeof(DTableH)); /* memcpy() into local variable, to avoid strict aliasing warning */ + + /* select fast mode (static) */ + if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + size_t errorCode; + + if (cSrcSize<2) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ + + /* normal FSE decoding mode */ + errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(errorCode)) return errorCode; + if (errorCode >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; /* too small input size */ + ip += errorCode; + cSrcSize -= errorCode; + + errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); + if (FSE_isError(errorCode)) return errorCode; + + /* always return, even if it is an error code */ + return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); +} + + + +/* ******************************************************* +* Huff0 : Huffman block compression +*********************************************************/ +#define HUF_MAX_SYMBOL_VALUE 255 +#define HUF_DEFAULT_TABLELOG 12 /* used by default, when not specified */ +#define HUF_MAX_TABLELOG 12 /* max possible tableLog; for allocation purpose; can be modified */ +#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) +# error "HUF_MAX_TABLELOG is too large !" +#endif + +typedef struct HUF_CElt_s { + U16 val; + BYTE nbBits; +} HUF_CElt ; + +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + + +/* ******************************************************* +* Huff0 : Huffman block decompression +*********************************************************/ +typedef struct { + BYTE byte; + BYTE nbBits; +} HUF_DElt; + +static size_t HUF_readDTable (U16* DTable, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ + U32 weightTotal; + U32 maxBits; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + U32 n; + U32 nextRankStart; + void* ptr = DTable+1; + HUF_DElt* const dt = (HUF_DElt*)ptr; + + if (!srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + iSize = ip[0]; + + FSE_STATIC_ASSERT(sizeof(HUF_DElt) == sizeof(U16)); /* if compilation fails here, assertion is false */ + //memset(huffWeight, 0, sizeof(huffWeight)); /* should not be necessary, but some analyzer complain ... */ + if (iSize >= 128) /* special header */ + { + if (iSize >= (242)) /* RLE */ + { + static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; + oSize = l[iSize-242]; + memset(huffWeight, 1, sizeof(huffWeight)); + iSize = 0; + } + else /* Incompressible */ + { + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + ip += 1; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } + } + } + else /* header compressed with FSE (normal case) */ + { + if (iSize+1 > srcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + oSize = FSE_decompress(huffWeight, HUF_MAX_SYMBOL_VALUE, ip+1, iSize); /* max 255 values decoded, last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankVal, 0, sizeof(rankVal)); + weightTotal = 0; + for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return (size_t)-FSE_ERROR_corruptionDetected; + rankVal[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } + if (weightTotal == 0) return (size_t)-FSE_ERROR_corruptionDetected; + + /* get last non-null symbol weight (implied, total must be 2^n) */ + maxBits = FSE_highbit32(weightTotal) + 1; + if (maxBits > DTable[0]) return (size_t)-FSE_ERROR_tableLog_tooLarge; /* DTable is too small */ + DTable[0] = (U16)maxBits; + { + U32 total = 1 << maxBits; + U32 rest = total - weightTotal; + U32 verif = 1 << FSE_highbit32(rest); + U32 lastWeight = FSE_highbit32(rest) + 1; + if (verif != rest) return (size_t)-FSE_ERROR_corruptionDetected; /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankVal[lastWeight]++; + } + + /* check tree construction validity */ + if ((rankVal[1] < 2) || (rankVal[1] & 1)) return (size_t)-FSE_ERROR_corruptionDetected; /* by construction : at least 2 elts of rank 1, must be even */ + + /* Prepare ranks */ + nextRankStart = 0; + for (n=1; n<=maxBits; n++) + { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } + + /* fill DTable */ + for (n=0; n<=oSize; n++) + { + const U32 w = huffWeight[n]; + const U32 length = (1 << w) >> 1; + U32 i; + HUF_DElt D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(maxBits + 1 - w); + for (i = rankVal[w]; i < rankVal[w] + length; i++) + dt[i] = D; + rankVal[w] += length; + } + + return iSize+1; +} + + +static BYTE HUF_decodeSymbol(FSE_DStream_t* Dstream, const HUF_DElt* dt, const U32 dtLog) +{ + const size_t val = FSE_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + const BYTE c = dt[val].byte; + FSE_skipBits(Dstream, dt[val].nbBits); + return c; +} + +static size_t HUF_decompress_usingDTable( /* -3% slower when non static */ + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const U16* DTable) +{ + if (cSrcSize < 6) return (size_t)-FSE_ERROR_srcSize_wrong; + { + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = maxDstSize < 15 ? op : omax-15; + + const void* ptr = DTable; + const HUF_DElt* const dt = (const HUF_DElt*)(ptr)+1; + const U32 dtLog = DTable[0]; + size_t errorCode; + U32 reloadStatus; + + /* Init */ + + const U16* jumpTable = (const U16*)cSrc; + const size_t length1 = FSE_readLE16(jumpTable); + const size_t length2 = FSE_readLE16(jumpTable+1); + const size_t length3 = FSE_readLE16(jumpTable+2); + const size_t length4 = cSrcSize - 6 - length1 - length2 - length3; /* check coherency !! */ + const char* const start1 = (const char*)(cSrc) + 6; + const char* const start2 = start1 + length1; + const char* const start3 = start2 + length2; + const char* const start4 = start3 + length3; + FSE_DStream_t bitD1, bitD2, bitD3, bitD4; + + if (length1+length2+length3+6 >= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + + errorCode = FSE_initDStream(&bitD1, start1, length1); + if (FSE_isError(errorCode)) return errorCode; + errorCode = FSE_initDStream(&bitD2, start2, length2); + if (FSE_isError(errorCode)) return errorCode; + errorCode = FSE_initDStream(&bitD3, start3, length3); + if (FSE_isError(errorCode)) return errorCode; + errorCode = FSE_initDStream(&bitD4, start4, length4); + if (FSE_isError(errorCode)) return errorCode; + + reloadStatus=FSE_reloadDStream(&bitD2); + + /* 16 symbols per loop */ + for ( ; (reloadStatus12)) FSE_reloadDStream(&Dstream) + + #define HUF_DECODE_SYMBOL_2(n, Dstream) \ + op[n] = HUF_decodeSymbol(&Dstream, dt, dtLog); \ + if (FSE_32bits()) FSE_reloadDStream(&Dstream) + + HUF_DECODE_SYMBOL_1( 0, bitD1); + HUF_DECODE_SYMBOL_1( 1, bitD2); + HUF_DECODE_SYMBOL_1( 2, bitD3); + HUF_DECODE_SYMBOL_1( 3, bitD4); + HUF_DECODE_SYMBOL_2( 4, bitD1); + HUF_DECODE_SYMBOL_2( 5, bitD2); + HUF_DECODE_SYMBOL_2( 6, bitD3); + HUF_DECODE_SYMBOL_2( 7, bitD4); + HUF_DECODE_SYMBOL_1( 8, bitD1); + HUF_DECODE_SYMBOL_1( 9, bitD2); + HUF_DECODE_SYMBOL_1(10, bitD3); + HUF_DECODE_SYMBOL_1(11, bitD4); + HUF_DECODE_SYMBOL_0(12, bitD1); + HUF_DECODE_SYMBOL_0(13, bitD2); + HUF_DECODE_SYMBOL_0(14, bitD3); + HUF_DECODE_SYMBOL_0(15, bitD4); + } + + if (reloadStatus!=FSE_DStream_completed) /* not complete : some bitStream might be FSE_DStream_unfinished */ + return (size_t)-FSE_ERROR_corruptionDetected; + + /* tail */ + { + /* bitTail = bitD1; */ /* *much* slower : -20% !??! */ + FSE_DStream_t bitTail; + bitTail.ptr = bitD1.ptr; + bitTail.bitsConsumed = bitD1.bitsConsumed; + bitTail.bitContainer = bitD1.bitContainer; /* required in case of FSE_DStream_endOfBuffer */ + bitTail.start = start1; + for ( ; (FSE_reloadDStream(&bitTail) < FSE_DStream_completed) && (op= cSrcSize) return (size_t)-FSE_ERROR_srcSize_wrong; + ip += errorCode; + cSrcSize -= errorCode; + + return HUF_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, DTable); +} + + +#endif /* FSE_COMMONDEFS_ONLY */ + +/* + zstd - standard compression library + Copyright (C) 2014-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - zstd source repository : https://github.com/Cyan4973/zstd + - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + +/**************************************************************** +* Tuning parameters +*****************************************************************/ +/* MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect */ +#define ZSTD_MEMORY_USAGE 17 + + +/************************************** + CPU Feature Detection +**************************************/ +/* + * Automated efficient unaligned memory access detection + * Based on known hardware architectures + * This list will be updated thanks to feedbacks + */ +#if defined(CPU_HAS_EFFICIENT_UNALIGNED_MEMORY_ACCESS) \ + || defined(__ARM_FEATURE_UNALIGNED) \ + || defined(__i386__) || defined(__x86_64__) \ + || defined(_M_IX86) || defined(_M_X64) \ + || defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_8__) \ + || (defined(_M_ARM) && (_M_ARM >= 7)) +# define ZSTD_UNALIGNED_ACCESS 1 +#else +# define ZSTD_UNALIGNED_ACCESS 0 +#endif + + +/******************************************************** +* Includes +*********************************************************/ +#include /* calloc */ +#include /* memcpy, memmove */ +#include /* debug : printf */ + + +/******************************************************** +* Compiler specifics +*********************************************************/ +#ifdef __AVX2__ +# include /* AVX2 intrinsics */ +#endif + +#ifdef _MSC_VER /* Visual Studio */ +# include /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4324) /* disable: C4324: padded structure */ +#endif + + +#ifndef MEM_ACCESS_MODULE +#define MEM_ACCESS_MODULE +/******************************************************** +* Basic Types +*********************************************************/ +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# include +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef int16_t S16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +#else +typedef unsigned char BYTE; +typedef unsigned short U16; +typedef signed short S16; +typedef unsigned int U32; +typedef signed int S32; +typedef unsigned long long U64; +#endif + +#endif /* MEM_ACCESS_MODULE */ + + +/******************************************************** +* Constants +*********************************************************/ +static const U32 ZSTD_magicNumber = 0xFD2FB51E; /* 3rd version : seqNb header */ + +#define HASH_LOG (ZSTD_MEMORY_USAGE - 2) +#define HASH_TABLESIZE (1 << HASH_LOG) +#define HASH_MASK (HASH_TABLESIZE - 1) + +#define KNUTH 2654435761 + +#define BIT7 128 +#define BIT6 64 +#define BIT5 32 +#define BIT4 16 + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define BLOCKSIZE (128 KB) /* define, for static allocation */ + +#define WORKPLACESIZE (BLOCKSIZE*3) +#define MINMATCH 4 +#define MLbits 7 +#define LLbits 6 +#define Offbits 5 +#define MaxML ((1<>3]; +#else + U32 hashTable[HASH_TABLESIZE]; +#endif + BYTE buffer[WORKPLACESIZE]; +} cctxi_t; + + + + +/************************************** +* Error Management +**************************************/ +/* published entry point */ +unsigned ZSTDv01_isError(size_t code) { return ERR_isError(code); } + + +/************************************** +* Tool functions +**************************************/ +#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ +#define ZSTD_VERSION_MINOR 1 /* for new (non-breaking) interface capabilities */ +#define ZSTD_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */ +#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) + +/************************************************************** +* Decompression code +**************************************************************/ + +static size_t ZSTDv01_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) +{ + const BYTE* const in = (const BYTE* const)src; + BYTE headerFlags; + U32 cSize; + + if (srcSize < 3) return ERROR(srcSize_wrong); + + headerFlags = *in; + cSize = in[2] + (in[1]<<8) + ((in[0] & 7)<<16); + + bpPtr->blockType = (blockType_t)(headerFlags >> 6); + bpPtr->origSize = (bpPtr->blockType == bt_rle) ? cSize : 0; + + if (bpPtr->blockType == bt_end) return 0; + if (bpPtr->blockType == bt_rle) return 1; + return cSize; +} + + +static size_t ZSTD_copyUncompressedBlock(void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + if (srcSize > maxDstSize) return ERROR(dstSize_tooSmall); + if (srcSize > 0) { + memcpy(dst, src, srcSize); + } + return srcSize; +} + + +static size_t ZSTD_decompressLiterals(void* ctx, + void* dst, size_t maxDstSize, + const void* src, size_t srcSize) +{ + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + maxDstSize; + const BYTE* ip = (const BYTE*)src; + size_t errorCode; + size_t litSize; + + /* check : minimum 2, for litSize, +1, for content */ + if (srcSize <= 3) return ERROR(corruption_detected); + + litSize = ip[1] + (ip[0]<<8); + litSize += ((ip[-3] >> 3) & 7) << 16; /* mmmmh.... */ + op = oend - litSize; + + (void)ctx; + if (litSize > maxDstSize) return ERROR(dstSize_tooSmall); + errorCode = HUF_decompress(op, litSize, ip+2, srcSize-2); + if (FSE_isError(errorCode)) return ERROR(GENERIC); + return litSize; +} + + +static size_t ZSTDv01_decodeLiteralsBlock(void* ctx, + void* dst, size_t maxDstSize, + const BYTE** litStart, size_t* litSize, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* ip = istart; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + blockProperties_t litbp; + + size_t litcSize = ZSTDv01_getcBlockSize(src, srcSize, &litbp); + if (ZSTDv01_isError(litcSize)) return litcSize; + if (litcSize > srcSize - ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + ip += ZSTD_blockHeaderSize; + + switch(litbp.blockType) + { + case bt_raw: + *litStart = ip; + ip += litcSize; + *litSize = litcSize; + break; + case bt_rle: + { + size_t rleSize = litbp.origSize; + if (rleSize>maxDstSize) return ERROR(dstSize_tooSmall); + if (!srcSize) return ERROR(srcSize_wrong); + if (rleSize > 0) { + memset(oend - rleSize, *ip, rleSize); + } + *litStart = oend - rleSize; + *litSize = rleSize; + ip++; + break; + } + case bt_compressed: + { + size_t decodedLitSize = ZSTD_decompressLiterals(ctx, dst, maxDstSize, ip, litcSize); + if (ZSTDv01_isError(decodedLitSize)) return decodedLitSize; + *litStart = oend - decodedLitSize; + *litSize = decodedLitSize; + ip += litcSize; + break; + } + case bt_end: + default: + return ERROR(GENERIC); + } + + return ip-istart; +} + + +static size_t ZSTDv01_decodeSeqHeaders(int* nbSeq, const BYTE** dumpsPtr, size_t* dumpsLengthPtr, + FSE_DTable* DTableLL, FSE_DTable* DTableML, FSE_DTable* DTableOffb, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* ip = istart; + const BYTE* const iend = istart + srcSize; + U32 LLtype, Offtype, MLtype; + U32 LLlog, Offlog, MLlog; + size_t dumpsLength; + + /* check */ + if (srcSize < 5) return ERROR(srcSize_wrong); + + /* SeqHead */ + *nbSeq = ZSTD_readLE16(ip); ip+=2; + LLtype = *ip >> 6; + Offtype = (*ip >> 4) & 3; + MLtype = (*ip >> 2) & 3; + if (*ip & 2) + { + dumpsLength = ip[2]; + dumpsLength += ip[1] << 8; + ip += 3; + } + else + { + dumpsLength = ip[1]; + dumpsLength += (ip[0] & 1) << 8; + ip += 2; + } + *dumpsPtr = ip; + ip += dumpsLength; + *dumpsLengthPtr = dumpsLength; + + /* check */ + if (ip > iend-3) return ERROR(srcSize_wrong); /* min : all 3 are "raw", hence no header, but at least xxLog bits per type */ + + /* sequences */ + { + S16 norm[MaxML+1]; /* assumption : MaxML >= MaxLL and MaxOff */ + size_t headerSize; + + /* Build DTables */ + switch(LLtype) + { + case bt_rle : + LLlog = 0; + FSE_buildDTable_rle(DTableLL, *ip++); break; + case bt_raw : + LLlog = LLbits; + FSE_buildDTable_raw(DTableLL, LLbits); break; + default : + { U32 max = MaxLL; + headerSize = FSE_readNCount(norm, &max, &LLlog, ip, iend-ip); + if (FSE_isError(headerSize)) return ERROR(GENERIC); + if (LLlog > LLFSELog) return ERROR(corruption_detected); + ip += headerSize; + FSE_buildDTable(DTableLL, norm, max, LLlog); + } } + + switch(Offtype) + { + case bt_rle : + Offlog = 0; + if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ + FSE_buildDTable_rle(DTableOffb, *ip++); break; + case bt_raw : + Offlog = Offbits; + FSE_buildDTable_raw(DTableOffb, Offbits); break; + default : + { U32 max = MaxOff; + headerSize = FSE_readNCount(norm, &max, &Offlog, ip, iend-ip); + if (FSE_isError(headerSize)) return ERROR(GENERIC); + if (Offlog > OffFSELog) return ERROR(corruption_detected); + ip += headerSize; + FSE_buildDTable(DTableOffb, norm, max, Offlog); + } } + + switch(MLtype) + { + case bt_rle : + MLlog = 0; + if (ip > iend-2) return ERROR(srcSize_wrong); /* min : "raw", hence no header, but at least xxLog bits */ + FSE_buildDTable_rle(DTableML, *ip++); break; + case bt_raw : + MLlog = MLbits; + FSE_buildDTable_raw(DTableML, MLbits); break; + default : + { U32 max = MaxML; + headerSize = FSE_readNCount(norm, &max, &MLlog, ip, iend-ip); + if (FSE_isError(headerSize)) return ERROR(GENERIC); + if (MLlog > MLFSELog) return ERROR(corruption_detected); + ip += headerSize; + FSE_buildDTable(DTableML, norm, max, MLlog); + } } } + + return ip-istart; +} + + +typedef struct { + size_t litLength; + size_t offset; + size_t matchLength; +} seq_t; + +typedef struct { + FSE_DStream_t DStream; + FSE_DState_t stateLL; + FSE_DState_t stateOffb; + FSE_DState_t stateML; + size_t prevOffset; + const BYTE* dumps; + const BYTE* dumpsEnd; +} seqState_t; + + +static void ZSTD_decodeSequence(seq_t* seq, seqState_t* seqState) +{ + size_t litLength; + size_t prevOffset; + size_t offset; + size_t matchLength; + const BYTE* dumps = seqState->dumps; + const BYTE* const de = seqState->dumpsEnd; + + /* Literal length */ + litLength = FSE_decodeSymbol(&(seqState->stateLL), &(seqState->DStream)); + prevOffset = litLength ? seq->offset : seqState->prevOffset; + seqState->prevOffset = seq->offset; + if (litLength == MaxLL) + { + const U32 add = dumpsstateOffb), &(seqState->DStream)); + if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); + nbBits = offsetCode - 1; + if (offsetCode==0) nbBits = 0; /* cmove */ + offset = ((size_t)1 << (nbBits & ((sizeof(offset)*8)-1))) + FSE_readBits(&(seqState->DStream), nbBits); + if (ZSTD_32bits()) FSE_reloadDStream(&(seqState->DStream)); + if (offsetCode==0) offset = prevOffset; + } + + /* MatchLength */ + matchLength = FSE_decodeSymbol(&(seqState->stateML), &(seqState->DStream)); + if (matchLength == MaxML) + { + const U32 add = dumpslitLength = litLength; + seq->offset = offset; + seq->matchLength = matchLength; + seqState->dumps = dumps; +} + + +static size_t ZSTD_execSequence(BYTE* op, + seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + BYTE* const base, BYTE* const oend) +{ + static const int dec32table[] = {0, 1, 2, 1, 4, 4, 4, 4}; /* added */ + static const int dec64table[] = {8, 8, 8, 7, 8, 9,10,11}; /* subtracted */ + const BYTE* const ostart = op; + const size_t litLength = sequence.litLength; + BYTE* const endMatch = op + litLength + sequence.matchLength; /* risk : address space overflow (32-bits) */ + const BYTE* const litEnd = *litPtr + litLength; + + /* check */ + if (endMatch > oend) return ERROR(dstSize_tooSmall); /* overwrite beyond dst buffer */ + if (litEnd > litLimit) return ERROR(corruption_detected); + if (sequence.matchLength > (size_t)(*litPtr-op)) return ERROR(dstSize_tooSmall); /* overwrite literal segment */ + + /* copy Literals */ + if (((size_t)(*litPtr - op) < 8) || ((size_t)(oend-litEnd) < 8) || (op+litLength > oend-8)) + memmove(op, *litPtr, litLength); /* overwrite risk */ + else + ZSTD_wildcopy(op, *litPtr, litLength); + op += litLength; + *litPtr = litEnd; /* update for next sequence */ + + /* check : last match must be at a minimum distance of 8 from end of dest buffer */ + if (oend-op < 8) return ERROR(dstSize_tooSmall); + + /* copy Match */ + { + const U32 overlapRisk = (((size_t)(litEnd - endMatch)) < 12); + const BYTE* match = op - sequence.offset; /* possible underflow at op - offset ? */ + size_t qutt = 12; + U64 saved[2]; + + /* check */ + if (match < base) return ERROR(corruption_detected); + if (sequence.offset > (size_t)base) return ERROR(corruption_detected); + + /* save beginning of literal sequence, in case of write overlap */ + if (overlapRisk) + { + if ((endMatch + qutt) > oend) qutt = oend-endMatch; + memcpy(saved, endMatch, qutt); + } + + if (sequence.offset < 8) + { + const int dec64 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= dec64; + } else { ZSTD_copy8(op, match); } + op += 8; match += 8; + + if (endMatch > oend-(16-MINMATCH)) + { + if (op < oend-8) + { + ZSTD_wildcopy(op, match, (oend-8) - op); + match += (oend-8) - op; + op = oend-8; + } + while (opLLTable; + U32* DTableML = dctx->MLTable; + U32* DTableOffb = dctx->OffTable; + BYTE* const base = (BYTE*) (dctx->base); + + /* Build Decoding Tables */ + errorCode = ZSTDv01_decodeSeqHeaders(&nbSeq, &dumps, &dumpsLength, + DTableLL, DTableML, DTableOffb, + ip, iend-ip); + if (ZSTDv01_isError(errorCode)) return errorCode; + ip += errorCode; + + /* Regen sequences */ + { + seq_t sequence; + seqState_t seqState; + + memset(&sequence, 0, sizeof(sequence)); + seqState.dumps = dumps; + seqState.dumpsEnd = dumps + dumpsLength; + seqState.prevOffset = 1; + errorCode = FSE_initDStream(&(seqState.DStream), ip, iend-ip); + if (FSE_isError(errorCode)) return ERROR(corruption_detected); + FSE_initDState(&(seqState.stateLL), &(seqState.DStream), DTableLL); + FSE_initDState(&(seqState.stateOffb), &(seqState.DStream), DTableOffb); + FSE_initDState(&(seqState.stateML), &(seqState.DStream), DTableML); + + for ( ; (FSE_reloadDStream(&(seqState.DStream)) <= FSE_DStream_completed) && (nbSeq>0) ; ) + { + size_t oneSeqSize; + nbSeq--; + ZSTD_decodeSequence(&sequence, &seqState); + oneSeqSize = ZSTD_execSequence(op, sequence, &litPtr, litEnd, base, oend); + if (ZSTDv01_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } + + /* check if reached exact end */ + if ( !FSE_endOfDStream(&(seqState.DStream)) ) return ERROR(corruption_detected); /* requested too much : data is corrupted */ + if (nbSeq<0) return ERROR(corruption_detected); /* requested too many sequences : data is corrupted */ + + /* last literal segment */ + { + size_t lastLLSize = litEnd - litPtr; + if (op+lastLLSize > oend) return ERROR(dstSize_tooSmall); + if (lastLLSize > 0) { + if (op != litPtr) memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + } + + return op-ostart; +} + + +static size_t ZSTD_decompressBlock( + void* ctx, + void* dst, size_t maxDstSize, + const void* src, size_t srcSize) +{ + /* blockType == blockCompressed, srcSize is trusted */ + const BYTE* ip = (const BYTE*)src; + const BYTE* litPtr = NULL; + size_t litSize = 0; + size_t errorCode; + + /* Decode literals sub-block */ + errorCode = ZSTDv01_decodeLiteralsBlock(ctx, dst, maxDstSize, &litPtr, &litSize, src, srcSize); + if (ZSTDv01_isError(errorCode)) return errorCode; + ip += errorCode; + srcSize -= errorCode; + + return ZSTD_decompressSequences(ctx, dst, maxDstSize, ip, srcSize, litPtr, litSize); +} + + +size_t ZSTDv01_decompressDCtx(void* ctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* iend = ip + srcSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* op = ostart; + BYTE* const oend = ostart + maxDstSize; + size_t remainingSize = srcSize; + U32 magicNumber; + size_t errorCode=0; + blockProperties_t blockProperties; + + /* Frame Header */ + if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + magicNumber = ZSTD_readBE32(src); + if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); + ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; + + /* Loop on each block */ + while (1) + { + size_t blockSize = ZSTDv01_getcBlockSize(ip, iend-ip, &blockProperties); + if (ZSTDv01_isError(blockSize)) return blockSize; + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (blockSize > remainingSize) return ERROR(srcSize_wrong); + + switch(blockProperties.blockType) + { + case bt_compressed: + errorCode = ZSTD_decompressBlock(ctx, op, oend-op, ip, blockSize); + break; + case bt_raw : + errorCode = ZSTD_copyUncompressedBlock(op, oend-op, ip, blockSize); + break; + case bt_rle : + return ERROR(GENERIC); /* not yet supported */ + break; + case bt_end : + /* end of frame */ + if (remainingSize) return ERROR(srcSize_wrong); + break; + default: + return ERROR(GENERIC); + } + if (blockSize == 0) break; /* bt_end */ + + if (ZSTDv01_isError(errorCode)) return errorCode; + op += errorCode; + ip += blockSize; + remainingSize -= blockSize; + } + + return op-ostart; +} + +size_t ZSTDv01_decompress(void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + dctx_t ctx; + ctx.base = dst; + return ZSTDv01_decompressDCtx(&ctx, dst, maxDstSize, src, srcSize); +} + +/* ZSTD_errorFrameSizeInfoLegacy() : + assumes `cSize` and `dBound` are _not_ NULL */ +static void ZSTD_errorFrameSizeInfoLegacy(size_t* cSize, unsigned long long* dBound, size_t ret) +{ + *cSize = ret; + *dBound = ZSTD_CONTENTSIZE_ERROR; +} + +void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, size_t* cSize, unsigned long long* dBound) +{ + const BYTE* ip = (const BYTE*)src; + size_t remainingSize = srcSize; + size_t nbBlocks = 0; + U32 magicNumber; + blockProperties_t blockProperties; + + /* Frame Header */ + if (srcSize < ZSTD_frameHeaderSize+ZSTD_blockHeaderSize) { + ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); + return; + } + magicNumber = ZSTD_readBE32(src); + if (magicNumber != ZSTD_magicNumber) { + ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(prefix_unknown)); + return; + } + ip += ZSTD_frameHeaderSize; remainingSize -= ZSTD_frameHeaderSize; + + /* Loop on each block */ + while (1) + { + size_t blockSize = ZSTDv01_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTDv01_isError(blockSize)) { + ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, blockSize); + return; + } + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (blockSize > remainingSize) { + ZSTD_errorFrameSizeInfoLegacy(cSize, dBound, ERROR(srcSize_wrong)); + return; + } + + if (blockSize == 0) break; /* bt_end */ + + ip += blockSize; + remainingSize -= blockSize; + nbBlocks++; + } + + *cSize = ip - (const BYTE*)src; + *dBound = nbBlocks * BLOCKSIZE; +} + +/******************************* +* Streaming Decompression API +*******************************/ + +size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx) +{ + dctx->expected = ZSTD_frameHeaderSize; + dctx->phase = 0; + dctx->previousDstEnd = NULL; + dctx->base = NULL; + return 0; +} + +ZSTDv01_Dctx* ZSTDv01_createDCtx(void) +{ + ZSTDv01_Dctx* dctx = (ZSTDv01_Dctx*)malloc(sizeof(ZSTDv01_Dctx)); + if (dctx==NULL) return NULL; + ZSTDv01_resetDCtx(dctx); + return dctx; +} + +size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx) +{ + free(dctx); + return 0; +} + +size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx) +{ + return ((dctx_t*)dctx)->expected; +} + +size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize) +{ + dctx_t* ctx = (dctx_t*)dctx; + + /* Sanity check */ + if (srcSize != ctx->expected) return ERROR(srcSize_wrong); + if (dst != ctx->previousDstEnd) /* not contiguous */ + ctx->base = dst; + + /* Decompress : frame header */ + if (ctx->phase == 0) + { + /* Check frame magic header */ + U32 magicNumber = ZSTD_readBE32(src); + if (magicNumber != ZSTD_magicNumber) return ERROR(prefix_unknown); + ctx->phase = 1; + ctx->expected = ZSTD_blockHeaderSize; + return 0; + } + + /* Decompress : block header */ + if (ctx->phase == 1) + { + blockProperties_t bp; + size_t blockSize = ZSTDv01_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTDv01_isError(blockSize)) return blockSize; + if (bp.blockType == bt_end) + { + ctx->expected = 0; + ctx->phase = 0; + } + else + { + ctx->expected = blockSize; + ctx->bType = bp.blockType; + ctx->phase = 2; + } + + return 0; + } + + /* Decompress : block content */ + { + size_t rSize; + switch(ctx->bType) + { + case bt_compressed: + rSize = ZSTD_decompressBlock(ctx, dst, maxDstSize, src, srcSize); + break; + case bt_raw : + rSize = ZSTD_copyUncompressedBlock(dst, maxDstSize, src, srcSize); + break; + case bt_rle : + return ERROR(GENERIC); /* not yet handled */ + break; + case bt_end : /* should never happen (filtered at phase 1) */ + rSize = 0; + break; + default: + return ERROR(GENERIC); + } + ctx->phase = 1; + ctx->expected = ZSTD_blockHeaderSize; + ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); + return rSize; + } + +} diff --git a/src/borg/algorithms/zstd/lib/legacy/zstd_v01.h b/src/borg/algorithms/zstd/lib/legacy/zstd_v01.h new file mode 100644 index 0000000000..7910351726 --- /dev/null +++ b/src/borg/algorithms/zstd/lib/legacy/zstd_v01.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_V01_H_28739879432 +#define ZSTD_V01_H_28739879432 + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ************************************* +* Includes +***************************************/ +#include /* size_t */ + + +/* ************************************* +* Simple one-step function +***************************************/ +/** +ZSTDv01_decompress() : decompress ZSTD frames compliant with v0.1.x format + compressedSize : is the exact source size + maxOriginalSize : is the size of the 'dst' buffer, which must be already allocated. + It must be equal or larger than originalSize, otherwise decompression will fail. + return : the number of bytes decompressed into destination buffer (originalSize) + or an errorCode if it fails (which can be tested using ZSTDv01_isError()) +*/ +size_t ZSTDv01_decompress( void* dst, size_t maxOriginalSize, + const void* src, size_t compressedSize); + + /** + ZSTDv01_findFrameSizeInfoLegacy() : get the source length and decompressed bound of a ZSTD frame compliant with v0.1.x format + srcSize : The size of the 'src' buffer, at least as large as the frame pointed to by 'src' + cSize (output parameter) : the number of bytes that would be read to decompress this frame + or an error code if it fails (which can be tested using ZSTDv01_isError()) + dBound (output parameter) : an upper-bound for the decompressed size of the data in the frame + or ZSTD_CONTENTSIZE_ERROR if an error occurs + + note : assumes `cSize` and `dBound` are _not_ NULL. + */ +void ZSTDv01_findFrameSizeInfoLegacy(const void *src, size_t srcSize, + size_t* cSize, unsigned long long* dBound); + +/** +ZSTDv01_isError() : tells if the result of ZSTDv01_decompress() is an error +*/ +unsigned ZSTDv01_isError(size_t code); + + +/* ************************************* +* Advanced functions +***************************************/ +typedef struct ZSTDv01_Dctx_s ZSTDv01_Dctx; +ZSTDv01_Dctx* ZSTDv01_createDCtx(void); +size_t ZSTDv01_freeDCtx(ZSTDv01_Dctx* dctx); + +size_t ZSTDv01_decompressDCtx(void* ctx, + void* dst, size_t maxOriginalSize, + const void* src, size_t compressedSize); + +/* ************************************* +* Streaming functions +***************************************/ +size_t ZSTDv01_resetDCtx(ZSTDv01_Dctx* dctx); + +size_t ZSTDv01_nextSrcSizeToDecompress(ZSTDv01_Dctx* dctx); +size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize); +/** + Use above functions alternatively. + ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). + ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. + Result is the number of bytes regenerated within 'dst'. + It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. +*/ + +/* ************************************* +* Prefix - version detection +***************************************/ +#define ZSTDv01_magicNumber 0xFD2FB51E /* Big Endian version */ +#define ZSTDv01_magicNumberLE 0x1EB52FFD /* Little Endian version */ + + +#if defined (__cplusplus) +} +#endif + +#endif /* ZSTD_V01_H_28739879432 */ diff --git a/src/borg/algorithms/zstd/lib/legacy/zstd_v02.c b/src/borg/algorithms/zstd/lib/legacy/zstd_v02.c new file mode 100644 index 0000000000..32d45a6d4b --- /dev/null +++ b/src/borg/algorithms/zstd/lib/legacy/zstd_v02.c @@ -0,0 +1,3518 @@ +/* + * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + + +#include /* size_t, ptrdiff_t */ +#include "zstd_v02.h" +#include "../common/error_private.h" + + +/****************************************** +* Compiler-specific +******************************************/ +#if defined(_MSC_VER) /* Visual Studio */ +# include /* _byteswap_ulong */ +# include /* _byteswap_* */ +#endif + + +/* ****************************************************************** + mem.h + low-level memory access routines + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************** +* Includes +******************************************/ +#include /* size_t, ptrdiff_t */ +#include /* memcpy */ + + +/****************************************** +* Compiler-specific +******************************************/ +#if defined(__GNUC__) +# define MEM_STATIC static __attribute__((unused)) +#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define MEM_STATIC static inline +#elif defined(_MSC_VER) +# define MEM_STATIC static __inline +#else +# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/**************************************************************** +* Basic Types +*****************************************************************/ +#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# include + typedef uint8_t BYTE; + typedef uint16_t U16; + typedef int16_t S16; + typedef uint32_t U32; + typedef int32_t S32; + typedef uint64_t U64; + typedef int64_t S64; +#else + typedef unsigned char BYTE; + typedef unsigned short U16; + typedef signed short S16; + typedef unsigned int U32; + typedef signed int S32; + typedef unsigned long long U64; + typedef signed long long S64; +#endif + + +/**************************************************************** +* Memory I/O +*****************************************************************/ +/* MEM_FORCE_MEMORY_ACCESS + * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. + * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. + * The below switch allow to select different access method for improved performance. + * Method 0 (default) : use `memcpy()`. Safe and portable. + * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). + * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. + * Method 2 : direct access. This method is portable but violate C standard. + * It can generate buggy code on targets generating assembly depending on alignment. + * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) + * See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details. + * Prefer these methods in priority order (0 > 1 > 2) + */ +#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ +# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) ) +# define MEM_FORCE_MEMORY_ACCESS 2 +# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ + (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) +# define MEM_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +MEM_STATIC unsigned MEM_32bits(void) { return sizeof(void*)==4; } +MEM_STATIC unsigned MEM_64bits(void) { return sizeof(void*)==8; } + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ + return one.c[0]; +} + +#if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) + +/* violates C standard on structure alignment. +Only use if no other choice to achieve best performance on target platform */ +MEM_STATIC U16 MEM_read16(const void* memPtr) { return *(const U16*) memPtr; } +MEM_STATIC U32 MEM_read32(const void* memPtr) { return *(const U32*) memPtr; } +MEM_STATIC U64 MEM_read64(const void* memPtr) { return *(const U64*) memPtr; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; } + +#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1) + +/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ +/* currently only defined for gcc and icc */ +typedef union { U16 u16; U32 u32; U64 u64; } __attribute__((packed)) unalign; + +MEM_STATIC U16 MEM_read16(const void* ptr) { return ((const unalign*)ptr)->u16; } +MEM_STATIC U32 MEM_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } +MEM_STATIC U64 MEM_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; } + +#else + +/* default method, safe and standard. + can sometimes prove slower */ + +MEM_STATIC U16 MEM_read16(const void* memPtr) +{ + U16 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U32 MEM_read32(const void* memPtr) +{ + U32 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC U64 MEM_read64(const void* memPtr) +{ + U64 val; memcpy(&val, memPtr, sizeof(val)); return val; +} + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) +{ + memcpy(memPtr, &value, sizeof(value)); +} + +#endif /* MEM_FORCE_MEMORY_ACCESS */ + + +MEM_STATIC U16 MEM_readLE16(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read16(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U16)(p[0] + (p[1]<<8)); + } +} + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) +{ + if (MEM_isLittleEndian()) + { + MEM_write16(memPtr, val); + } + else + { + BYTE* p = (BYTE*)memPtr; + p[0] = (BYTE)val; + p[1] = (BYTE)(val>>8); + } +} + +MEM_STATIC U32 MEM_readLE24(const void* memPtr) +{ + return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); +} + +MEM_STATIC U32 MEM_readLE32(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read32(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U32)((U32)p[0] + ((U32)p[1]<<8) + ((U32)p[2]<<16) + ((U32)p[3]<<24)); + } +} + + +MEM_STATIC U64 MEM_readLE64(const void* memPtr) +{ + if (MEM_isLittleEndian()) + return MEM_read64(memPtr); + else + { + const BYTE* p = (const BYTE*)memPtr; + return (U64)((U64)p[0] + ((U64)p[1]<<8) + ((U64)p[2]<<16) + ((U64)p[3]<<24) + + ((U64)p[4]<<32) + ((U64)p[5]<<40) + ((U64)p[6]<<48) + ((U64)p[7]<<56)); + } +} + + +MEM_STATIC size_t MEM_readLEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* MEM_H_MODULE */ + + +/* ****************************************************************** + bitstream + Part of NewGen Entropy library + header file (to include) + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#ifndef BITSTREAM_H_MODULE +#define BITSTREAM_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/* +* This API consists of small unitary functions, which highly benefit from being inlined. +* Since link-time-optimization is not available for all compilers, +* these functions are defined into a .h to be included. +*/ + + +/********************************************** +* bitStream decompression API (read backward) +**********************************************/ +typedef struct +{ + size_t bitContainer; + unsigned bitsConsumed; + const char* ptr; + const char* start; +} BIT_DStream_t; + +typedef enum { BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ + /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ + +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + + +/****************************************** +* unsafe API +******************************************/ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); +/* faster, but works only if nbBits >= 1 */ + + + +/**************************************************************** +* Helper functions +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ + return __builtin_clz (val) ^ 31; +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + unsigned r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; + return r; +# endif +} + + + +/********************************************************** +* bitStream decoding +**********************************************************/ + +/*!BIT_initDStream +* Initialize a BIT_DStream_t. +* @bitD : a pointer to an already allocated BIT_DStream_t structure +* @srcBuffer must point at the beginning of a bitStream +* @srcSize must be the exact size of the bitStream +* @result : size of stream (== srcSize) or an errorCode if a problem is detected +*/ +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + + if (srcSize >= sizeof(size_t)) /* normal case */ + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(size_t); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ + bitD->bitsConsumed = 8 - BIT_highbit32(contain32); + } + else + { + U32 contain32; + bitD->start = (const char*)srcBuffer; + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[6]) << (sizeof(size_t)*8 - 16); + /* fallthrough */ + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[5]) << (sizeof(size_t)*8 - 24); + /* fallthrough */ + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[4]) << (sizeof(size_t)*8 - 32); + /* fallthrough */ + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[3]) << 24; + /* fallthrough */ + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[2]) << 16; + /* fallthrough */ + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(bitD->start))[1]) << 8; + /* fallthrough */ + default:; + } + contain32 = ((const BYTE*)srcBuffer)[srcSize-1]; + if (contain32 == 0) return ERROR(GENERIC); /* endMark not present */ + bitD->bitsConsumed = 8 - BIT_highbit32(contain32); + bitD->bitsConsumed += (U32)(sizeof(size_t) - srcSize)*8; + } + + return srcSize; +} + +MEM_STATIC size_t BIT_lookBits(BIT_DStream_t* bitD, U32 nbBits) +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); +} + +/*! BIT_lookBitsFast : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_lookBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + const U32 bitMask = sizeof(bitD->bitContainer)*8 - 1; + return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); +} + +MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*!BIT_readBitsFast : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t value = BIT_lookBitsFast(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should never happen */ + return BIT_DStream_overflow; + + if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) + { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; + } + if (bitD->ptr == bitD->start) + { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } + { + U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) + { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = BIT_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ + return result; + } +} + +/*! BIT_endOfDStream +* @return Tells if DStream has reached its exact end +*/ +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) +{ + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); +} + +#if defined (__cplusplus) +} +#endif + +#endif /* BITSTREAM_H_MODULE */ +/* ****************************************************************** + Error codes and messages + Copyright (C) 2013-2015, Yann Collet + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#ifndef ERROR_H_MODULE +#define ERROR_H_MODULE + +#if defined (__cplusplus) +extern "C" { +#endif + + +/****************************************** +* Compiler-specific +******************************************/ +#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +# define ERR_STATIC static inline +#elif defined(_MSC_VER) +# define ERR_STATIC static __inline +#elif defined(__GNUC__) +# define ERR_STATIC static __attribute__((unused)) +#else +# define ERR_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */ +#endif + + +/****************************************** +* Error Management +******************************************/ +#define PREFIX(name) ZSTD_error_##name + +#define ERROR(name) (size_t)-PREFIX(name) + +#define ERROR_LIST(ITEM) \ + ITEM(PREFIX(No_Error)) ITEM(PREFIX(GENERIC)) \ + ITEM(PREFIX(dstSize_tooSmall)) ITEM(PREFIX(srcSize_wrong)) \ + ITEM(PREFIX(prefix_unknown)) ITEM(PREFIX(corruption_detected)) \ + ITEM(PREFIX(tableLog_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooLarge)) ITEM(PREFIX(maxSymbolValue_tooSmall)) \ + ITEM(PREFIX(maxCode)) + +#define ERROR_GENERATE_ENUM(ENUM) ENUM, +typedef enum { ERROR_LIST(ERROR_GENERATE_ENUM) } ERR_codes; /* enum is exposed, to detect & handle specific errors; compare function result to -enum value */ + +#define ERROR_CONVERTTOSTRING(STRING) #STRING, +#define ERROR_GENERATE_STRING(EXPR) ERROR_CONVERTTOSTRING(EXPR) +static const char* ERR_strings[] = { ERROR_LIST(ERROR_GENERATE_STRING) }; + +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } + +ERR_STATIC const char* ERR_getErrorName(size_t code) +{ + static const char* codeError = "Unspecified error code"; + if (ERR_isError(code)) return ERR_strings[-(int)(code)]; + return codeError; +} + + +#if defined (__cplusplus) +} +#endif + +#endif /* ERROR_H_MODULE */ +/* +Constructor and Destructor of type FSE_CTable + Note that its size depends on 'tableLog' and 'maxSymbolValue' */ +typedef unsigned FSE_CTable; /* don't allocate that. It's just a way to be more restrictive than void* */ +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ + + +/* ****************************************************************** + FSE : Finite State Entropy coder + header file for static linking (only) + Copyright (C) 2013-2015, Yann Collet + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ +#if defined (__cplusplus) +extern "C" { +#endif + + +/****************************************** +* Static allocation +******************************************/ +/* FSE buffer bounds */ +#define FSE_NCOUNTBOUND 512 +#define FSE_BLOCKBOUND(size) (size + (size>>7)) +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* You can statically allocate FSE CTable/DTable as a table of unsigned using below macro */ +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= 1 (otherwise, result will be corrupted) */ + + +/****************************************** +* Implementation of inline functions +******************************************/ + +/* decompression */ + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) +{ + FSE_DTableHeader DTableH; + memcpy(&DTableH, dt, sizeof(DTableH)); + DStatePtr->state = BIT_readBits(bitD, DTableH.tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = BIT_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + const FSE_decode_t DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + const U32 nbBits = DInfo.nbBits; + BYTE symbol = DInfo.symbol; + size_t lowBits = BIT_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + +#if defined (__cplusplus) +} +#endif +/* ****************************************************************** + Huff0 : Huffman coder, part of New Generation Entropy library + header file for static linking (only) + Copyright (C) 2013-2015, Yann Collet + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/****************************************** +* Static allocation macros +******************************************/ +/* Huff0 buffer bounds */ +#define HUF_CTABLEBOUND 129 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* static allocation of Huff0's DTable */ +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1< /* size_t */ + + +/* ************************************* +* Version +***************************************/ +#define ZSTD_VERSION_MAJOR 0 /* for breaking interface changes */ +#define ZSTD_VERSION_MINOR 2 /* for new (non-breaking) interface capabilities */ +#define ZSTD_VERSION_RELEASE 2 /* for tweaks, bug-fixes, or development */ +#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) + + +/* ************************************* +* Advanced functions +***************************************/ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; /* incomplete type */ + +#if defined (__cplusplus) +} +#endif +/* + zstd - standard compression library + Header File for static linking only + Copyright (C) 2014-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - zstd source repository : https://github.com/Cyan4973/zstd + - ztsd public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + +/* The objects defined into this file should be considered experimental. + * They are not labelled stable, as their prototype may change in the future. + * You can use them for tests, provide feedback, or if you can endure risk of future changes. + */ + +#if defined (__cplusplus) +extern "C" { +#endif + +/* ************************************* +* Streaming functions +***************************************/ + +typedef struct ZSTD_DCtx_s ZSTD_DCtx; + +/* + Use above functions alternatively. + ZSTD_nextSrcSizeToDecompress() tells how much bytes to provide as 'srcSize' to ZSTD_decompressContinue(). + ZSTD_decompressContinue() will use previous data blocks to improve compression if they are located prior to current block. + Result is the number of bytes regenerated within 'dst'. + It can be zero, which is not an error; it just means ZSTD_decompressContinue() has decoded some header. +*/ + +/* ************************************* +* Prefix - version detection +***************************************/ +#define ZSTD_magicNumber 0xFD2FB522 /* v0.2 (current)*/ + + +#if defined (__cplusplus) +} +#endif +/* ****************************************************************** + FSE : Finite State Entropy coder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +#ifndef FSE_COMMONDEFS_ONLY + +/**************************************************************** +* Tuning parameters +****************************************************************/ +/* MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#define FSE_MAX_MEMORY_USAGE 14 +#define FSE_DEFAULT_MEMORY_USAGE 13 + +/* FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#define FSE_MAX_SYMBOL_VALUE 255 + + +/**************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION + + +/**************************************************************** +* Byte symbol type +****************************************************************/ +#endif /* !FSE_COMMONDEFS_ONLY */ + + +/**************************************************************** +* Compiler specifics +****************************************************************/ +#ifdef _MSC_VER /* Visual Studio */ +# define FORCE_INLINE static __forceinline +# include /* For Visual 2005 */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */ +#else +# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */ +# ifdef __GNUC__ +# define FORCE_INLINE static inline __attribute__((always_inline)) +# else +# define FORCE_INLINE static inline +# endif +# else +# define FORCE_INLINE static +# endif /* __STDC_VERSION__ */ +#endif + + +/**************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ + +/**************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +#error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + + +/**************************************************************** +* Error Management +****************************************************************/ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/**************************************************************** +* Complex types +****************************************************************/ +typedef U32 DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)]; + + +/**************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +#define FSE_DECODE_TYPE FSE_decode_t + +static U32 FSE_tableStep(U32 tableSize) { return (tableSize>>1) + (tableSize>>3) + 3; } + +static size_t FSE_buildDTable +(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* ptr = dt+1; + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*)ptr; + FSE_DTableHeader DTableH; + const U32 tableSize = 1 << tableLog; + const U32 tableMask = tableSize-1; + const U32 step = FSE_tableStep(tableSize); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + U32 position = 0; + U32 highThreshold = tableSize-1; + const S16 largeLimit= (S16)(1 << (tableLog-1)); + U32 noLarge = 1; + U32 s; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ + DTableH.tableLog = (U16)tableLog; + for (s=0; s<=maxSymbolValue; s++) + { + if (normalizedCounter[s]==-1) + { + tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s; + symbolNext[s] = 1; + } + else + { + if (normalizedCounter[s] >= largeLimit) noLarge=0; + symbolNext[s] = normalizedCounter[s]; + } + } + + /* Spread symbols */ + for (s=0; s<=maxSymbolValue; s++) + { + int i; + for (i=0; i highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } + } + + if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + + /* Build Decoding table */ + { + U32 i; + for (i=0; i FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) && (charnum<=*maxSVPtr)) + { + if (previous0) + { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) + { + n0+=24; + if (ip < iend-5) + { + ip+=2; + bitStream = MEM_readLE32(ip) >> bitCount; + } + else + { + bitStream >>= 16; + bitCount+=16; + } + } + while ((bitStream & 3) == 3) + { + n0+=3; + bitStream>>=2; + bitCount+=2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + bitStream = MEM_readLE32(ip) >> bitCount; + } + else + bitStream >>= 2; + } + { + const short max = (short)((2*threshold-1)-remaining); + short count; + + if ((bitStream & (threshold-1)) < (U32)max) + { + count = (short)(bitStream & (threshold-1)); + bitCount += nbBits-1; + } + else + { + count = (short)(bitStream & (2*threshold-1)); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= FSE_abs(count); + normalizedCounter[charnum++] = count; + previous0 = !count; + while (remaining < threshold) + { + nbBits--; + threshold >>= 1; + } + + { + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) + { + ip += bitCount>>3; + bitCount &= 7; + } + else + { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> (bitCount & 31); + } + } + } + if (remaining != 1) return ERROR(GENERIC); + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + if ((size_t)(ip-istart) > hbSize) return ERROR(srcSize_wrong); + return ip-istart; +} + + +/********************************************************* +* Decompression (Byte symbols) +*********************************************************/ +static size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const cell = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + + DTableH->tableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +static size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + FSE_decode_t* const dinfo = (FSE_decode_t*)(ptr) + 1; /* because dt is unsigned */ + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSymbolValue = tableMask; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s<=maxSymbolValue; s++) + { + dinfo[s].newState = 0; + dinfo[s].symbol = (BYTE)s; + dinfo[s].nbBits = (BYTE)nbBits; + } + + return 0; +} + +FORCE_INLINE size_t FSE_decompress_usingDTable_generic( + void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt, const unsigned fast) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const omax = op + maxDstSize; + BYTE* const olimit = omax-3; + + BIT_DStream_t bitD; + FSE_DState_t state1; + FSE_DState_t state2; + size_t errorCode; + + /* Init */ + errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); /* replaced last arg by maxCompressed Size */ + if (FSE_isError(errorCode)) return errorCode; + + FSE_initDState(&state1, &bitD, dt); + FSE_initDState(&state2, &bitD, dt); + +#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD) + + /* 4 symbols per loop */ + for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) && (op sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ + while (1) + { + if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state1))) ) + break; + + *op++ = FSE_GETSYMBOL(&state1); + + if ( (BIT_reloadDStream(&bitD)>BIT_DStream_completed) || (op==omax) || (BIT_endOfDStream(&bitD) && (fast || FSE_endOfDState(&state2))) ) + break; + + *op++ = FSE_GETSYMBOL(&state2); + } + + /* end ? */ + if (BIT_endOfDStream(&bitD) && FSE_endOfDState(&state1) && FSE_endOfDState(&state2)) + return op-ostart; + + if (op==omax) return ERROR(dstSize_tooSmall); /* dst buffer is full, but cSrc unfinished */ + + return ERROR(corruption_detected); +} + + +static size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + FSE_DTableHeader DTableH; + memcpy(&DTableH, dt, sizeof(DTableH)); + + /* select fast mode (static) */ + if (DTableH.fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +static size_t FSE_decompress(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */ + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + size_t errorCode; + + if (cSrcSize<2) return ERROR(srcSize_wrong); /* too small input size */ + + /* normal FSE decoding mode */ + errorCode = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(errorCode)) return errorCode; + if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size */ + ip += errorCode; + cSrcSize -= errorCode; + + errorCode = FSE_buildDTable (dt, counting, maxSymbolValue, tableLog); + if (FSE_isError(errorCode)) return errorCode; + + /* always return, even if it is an error code */ + return FSE_decompress_usingDTable (dst, maxDstSize, ip, cSrcSize, dt); +} + + + +#endif /* FSE_COMMONDEFS_ONLY */ +/* ****************************************************************** + Huff0 : Huffman coder, part of New Generation Entropy library + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+Huff0 source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/**************************************************************** +* Compiler specifics +****************************************************************/ +#if defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) +/* inline is defined */ +#elif defined(_MSC_VER) +# define inline __inline +#else +# define inline /* disable inline */ +#endif + + +#ifdef _MSC_VER /* Visual Studio */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + + +/**************************************************************** +* Includes +****************************************************************/ +#include /* malloc, free, qsort */ +#include /* memcpy, memset */ +#include /* printf (debug) */ + +/**************************************************************** +* Error Management +****************************************************************/ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/****************************************** +* Helper functions +******************************************/ +static unsigned HUF_isError(size_t code) { return ERR_isError(code); } + +#define HUF_ABSOLUTEMAX_TABLELOG 16 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#define HUF_MAX_TABLELOG 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_DEFAULT_TABLELOG HUF_MAX_TABLELOG /* tableLog by default, when not specified */ +#define HUF_MAX_SYMBOL_VALUE 255 +#if (HUF_MAX_TABLELOG > HUF_ABSOLUTEMAX_TABLELOG) +# error "HUF_MAX_TABLELOG is too large !" +#endif + + + +/********************************************************* +* Huff0 : Huffman block decompression +*********************************************************/ +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ + +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ + +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; + +/*! HUF_readStats + Read compact Huffman tree, saved by HUF_writeCTable + @huffWeight : destination buffer + @return : size read from `src` +*/ +static size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize) +{ + U32 weightTotal; + U32 tableLog; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + U32 n; + + if (!srcSize) return ERROR(srcSize_wrong); + iSize = ip[0]; + //memset(huffWeight, 0, hwSize); /* is not necessary, even though some analyzer complain ... */ + + if (iSize >= 128) /* special header */ + { + if (iSize >= (242)) /* RLE */ + { + static int l[14] = { 1, 2, 3, 4, 7, 8, 15, 16, 31, 32, 63, 64, 127, 128 }; + oSize = l[iSize-242]; + memset(huffWeight, 1, hwSize); + iSize = 0; + } + else /* Incompressible */ + { + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } + } + } + else /* header compressed with FSE (normal case) */ + { + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + oSize = FSE_decompress(huffWeight, hwSize-1, ip+1, iSize); /* max (hwSize-1) values decoded, as last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankStats, 0, (HUF_ABSOLUTEMAX_TABLELOG + 1) * sizeof(U32)); + weightTotal = 0; + for (n=0; n= HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ + tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(corruption_detected); + { + U32 total = 1 << tableLog; + U32 rest = total - weightTotal; + U32 verif = 1 << BIT_highbit32(rest); + U32 lastWeight = BIT_highbit32(rest) + 1; + if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankStats[lastWeight]++; + } + + /* check tree construction validity */ + if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ + + /* results */ + *nbSymbolsPtr = (U32)(oSize+1); + *tableLogPtr = tableLog; + return iSize+1; +} + + +/**************************/ +/* single-symbol decoding */ +/**************************/ + +static size_t HUF_readDTableX2 (U16* DTable, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_MAX_SYMBOL_VALUE + 1]; + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; /* large enough for values from 0 to 16 */ + U32 tableLog = 0; + const BYTE* ip = (const BYTE*) src; + size_t iSize = ip[0]; + U32 nbSymbols = 0; + U32 n; + U32 nextRankStart; + void* ptr = DTable+1; + HUF_DEltX2* const dt = (HUF_DEltX2*)ptr; + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U16)); /* if compilation fails here, assertion is false */ + //memset(huffWeight, 0, sizeof(huffWeight)); /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(huffWeight, HUF_MAX_SYMBOL_VALUE + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > DTable[0]) return ERROR(tableLog_tooLarge); /* DTable is too small */ + DTable[0] = (U16)tableLog; /* maybe should separate sizeof DTable, as allocated, from used size of DTable, in case of DTable re-use */ + + /* Prepare ranks */ + nextRankStart = 0; + for (n=1; n<=tableLog; n++) + { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } + + /* fill DTable */ + for (n=0; n> 1; + U32 i; + HUF_DEltX2 D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); + for (i = rankVal[w]; i < rankVal[w] + length; i++) + dt[i] = D; + rankVal[w] += length; + } + + return iSize; +} + +static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) +{ + const size_t val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + const BYTE c = dt[val].byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; +} + +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +static inline size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 4 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) + { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + + /* closer to the end */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + /* no more data to retrieve from bitstream, hence no need to reload */ + while (p < pEnd) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + return pEnd-pStart; +} + + +static size_t HUF_decompress4X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const U16* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { + const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + + const void* ptr = DTable; + const HUF_DEltX2* const dt = ((const HUF_DEltX2*)ptr) +1; + const U32 dtLog = DTable[0]; + size_t errorCode; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + const size_t length1 = MEM_readLE16(istart); + const size_t length2 = MEM_readLE16(istart+2); + const size_t length3 = MEM_readLE16(istart+4); + size_t length4; + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + + length4 = cSrcSize - (length1 + length2 + length3 + 6); + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) + { + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endSignal) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; + } +} + + +static size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX2(DTable, HUF_MAX_TABLELOG); + const BYTE* ip = (const BYTE*) cSrc; + size_t errorCode; + + errorCode = HUF_readDTableX2 (DTable, cSrc, cSrcSize); + if (HUF_isError(errorCode)) return errorCode; + if (errorCode >= cSrcSize) return ERROR(srcSize_wrong); + ip += errorCode; + cSrcSize -= errorCode; + + return HUF_decompress4X2_usingDTable (dst, dstSize, ip, cSrcSize, DTable); +} + + +/***************************/ +/* double-symbols decoding */ +/***************************/ + +static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, + const U32* rankValOrigin, const int minWeight, + const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, + U32 nbBitsBaseline, U16 baseSeq) +{ + HUF_DEltX4 DElt; + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; + U32 s; + + /* get pre-calculated rankVal */ + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill skipped values */ + if (minWeight>1) + { + U32 i, skipSize = rankVal[minWeight]; + MEM_writeLE16(&(DElt.sequence), baseSeq); + DElt.nbBits = (BYTE)(consumed); + DElt.length = 1; + for (i = 0; i < skipSize; i++) + DTable[i] = DElt; + } + + /* fill DTable */ + for (s=0; s= 1 */ + + rankVal[weight] += length; + } +} + +typedef U32 rankVal_t[HUF_ABSOLUTEMAX_TABLELOG][HUF_ABSOLUTEMAX_TABLELOG + 1]; + +static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, + const sortedSymbol_t* sortedList, const U32 sortedListSize, + const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32 nbBitsBaseline) +{ + U32 rankVal[HUF_ABSOLUTEMAX_TABLELOG + 1]; + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + const U32 minBits = nbBitsBaseline - maxWeight; + U32 s; + + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill DTable */ + for (s=0; s= minBits) /* enough room for a second symbol */ + { + U32 sortedRank; + int minWeight = nbBits + scaleLog; + if (minWeight < 1) minWeight = 1; + sortedRank = rankStart[minWeight]; + HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, + rankValOrigin[nbBits], minWeight, + sortedList+sortedRank, sortedListSize-sortedRank, + nbBitsBaseline, symbol); + } + else + { + U32 i; + const U32 end = start + length; + HUF_DEltX4 DElt; + + MEM_writeLE16(&(DElt.sequence), symbol); + DElt.nbBits = (BYTE)(nbBits); + DElt.length = 1; + for (i = start; i < end; i++) + DTable[i] = DElt; + } + rankVal[weight] += length; + } +} + +static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize) +{ + BYTE weightList[HUF_MAX_SYMBOL_VALUE + 1]; + sortedSymbol_t sortedSymbol[HUF_MAX_SYMBOL_VALUE + 1]; + U32 rankStats[HUF_ABSOLUTEMAX_TABLELOG + 1] = { 0 }; + U32 rankStart0[HUF_ABSOLUTEMAX_TABLELOG + 2] = { 0 }; + U32* const rankStart = rankStart0+1; + rankVal_t rankVal; + U32 tableLog, maxW, sizeOfSort, nbSymbols; + const U32 memLog = DTable[0]; + const BYTE* ip = (const BYTE*) src; + size_t iSize = ip[0]; + void* ptr = DTable; + HUF_DEltX4* const dt = ((HUF_DEltX4*)ptr) + 1; + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(U32)); /* if compilation fails here, assertion is false */ + if (memLog > HUF_ABSOLUTEMAX_TABLELOG) return ERROR(tableLog_tooLarge); + //memset(weightList, 0, sizeof(weightList)); /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(weightList, HUF_MAX_SYMBOL_VALUE + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > memLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + + /* find maxWeight */ + for (maxW = tableLog; rankStats[maxW]==0; maxW--) + {if (!maxW) return ERROR(GENERIC); } /* necessarily finds a solution before maxW==0 */ + + /* Get start index of each weight */ + { + U32 w, nextRankStart = 0; + for (w=1; w<=maxW; w++) + { + U32 current = nextRankStart; + nextRankStart += rankStats[w]; + rankStart[w] = current; + } + rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ + sizeOfSort = nextRankStart; + } + + /* sort symbols by weight */ + { + U32 s; + for (s=0; s> consumed; + } + } + } + + HUF_fillDTableX4(dt, memLog, + sortedSymbol, sizeOfSort, + rankStart0, rankVal, maxW, + tableLog+1); + + return iSize; +} + + +static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + const size_t val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else + { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) + { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + } + } + return 1; +} + + +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_MAX_TABLELOG<=12)) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +static inline size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd-7)) + { + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + } + + /* closer to the end */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-2)) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + + + +static size_t HUF_decompress4X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const U32* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { + const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + + const void* ptr = DTable; + const HUF_DEltX4* const dt = ((const HUF_DEltX4*)ptr) +1; + const U32 dtLog = DTable[0]; + size_t errorCode; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + const size_t length1 = MEM_readLE16(istart); + const size_t length2 = MEM_readLE16(istart+2); + const size_t length3 = MEM_readLE16(istart+4); + size_t length4; + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + + length4 = cSrcSize - (length1 + length2 + length3 + 6); + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; + errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) + { + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); + + /* check */ + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endSignal) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; + } +} + + +static size_t HUF_decompress4X4 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + HUF_CREATE_STATIC_DTABLEX4(DTable, HUF_MAX_TABLELOG); + const BYTE* ip = (const BYTE*) cSrc; + + size_t hSize = HUF_readDTableX4 (DTable, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; + cSrcSize -= hSize; + + return HUF_decompress4X4_usingDTable (dst, dstSize, ip, cSrcSize, DTable); +} + + +/**********************************/ +/* quad-symbol decoding */ +/**********************************/ +typedef struct { BYTE nbBits; BYTE nbBytes; } HUF_DDescX6; +typedef union { BYTE byte[4]; U32 sequence; } HUF_DSeqX6; + +/* recursive, up to level 3; may benefit from