diff --git a/.github/workflows/pull-request.yml b/.github/workflows/pull-request.yml index 32a5f61ba301..886d1b328ba7 100644 --- a/.github/workflows/pull-request.yml +++ b/.github/workflows/pull-request.yml @@ -61,100 +61,3 @@ jobs: run: printf 'WARN_AS_ERROR = FAIL_ON_WARNINGS\n' >> doc/sof.doxygen.in && ninja -C docbuild -v doc - - - # This is a bit redundant with the other jobs below and with the (much - # faster!) installer[.yml] but it may differ in which platforms are - # built. This makes sure platforms without any open-source toolchain - # are added in the right place and do not accidentally break the -a - # option, Docker testing etc. - gcc-build-default-platforms: - runs-on: ubuntu-22.04 - - steps: - - uses: actions/checkout@v4 - with: {fetch-depth: 0, submodules: recursive, filter: 'tree:0'} - - - name: docker - run: docker pull thesofproject/sof && docker tag thesofproject/sof sof - - - name: xtensa-build-all.sh -a - run: ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh -a || - ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh -a -j 1 - - - gcc-build-only: - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - # Use groups to avoid spamming the web interface. Pay attention - # to COMMAS. Don't use a single big group so a single failure - # does not block all other builds. - platform: [rn rmb, - mt8186 mt8195 mt8188, - ] - - steps: - - - uses: actions/checkout@v4 - with: {fetch-depth: 0, submodules: recursive, filter: 'tree:0'} - - - name: docker - run: docker pull thesofproject/sof && docker tag thesofproject/sof sof - - - name: xtensa-build-all.sh platforms - env: - PLATFORM: ${{ matrix.platform }} - run: ./scripts/docker-run.sh - ./scripts/xtensa-build-all.sh -r ${PLATFORM} - - # Warning: there is a fair amount of duplication between 'build-only' - # and 'qemu-boot' because github does not support YAML anchors as of Jan - # 2021. Defining our new actions would be overkill. Another popular - # option is to generate this file from a source with YAML anchors - # before committing it; also deemed overkill for the current amount of - # duplication. - - qemu-boot-test: - runs-on: ubuntu-22.04 - - strategy: - fail-fast: false - matrix: - # Compiler-based groups, see HOST= compilers in - # xtensa-build-all.sh. Pay attention to commas and whitespace. - # The main reason for these groups is to avoid the matrix - # swarming the Github web interface and burying other checks. - # See longer example above. - platform: [imx8m, - ] - - steps: - - - uses: actions/checkout@v4 - with: {fetch-depth: 0, submodules: recursive, filter: 'tree:0'} - - - name: turn off HAVE_AGENT - run: echo CONFIG_HAVE_AGENT=n > - src/arch/xtensa/configs/override/no-agent.config - - - name: docker SOF - run: docker pull thesofproject/sof && docker tag thesofproject/sof sof - - - name: xtensa-build-all.sh -o no-agent platforms - env: - PLATFORM: ${{ matrix.platform }} - run: ./scripts/docker-run.sh - ./scripts/xtensa-build-all.sh -o no-agent -r ${PLATFORM} - - - name: docker QEMU - run: docker pull thesofproject/sofqemu && - docker tag thesofproject/sofqemu sofqemu - - - name: qemu-check - env: - PLATFORM: ${{ matrix.platform }} - run: ./scripts/docker-qemu.sh - ../sof.git/scripts/qemu-check.sh ${PLATFORM} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index a788a0800c5e..000000000000 --- a/.travis.yml +++ /dev/null @@ -1,112 +0,0 @@ ---- -# Suggested tools that can save round-trips to github and a lot of time: -# -# yamllint .travis.yml -# ~/.gem/ruby/2.7.0/bin/travis lint .travis.yml -# yaml merge-expand .travis.yml exp.yml && diff -b -u .travis.yml exp.yml - -language: c -# Ubuntu 20.04 LTS -dist: focal - -git: - depth: false - -services: - - docker - -# We don't need the strict sequence of stages; for now we use stages -# only as presentation labels. Nothing in stage "tests" will run if -# anything in buildonly fails; in the future we may want to get rid of -# stages. -stages: - - buildonly - - tests - - -# 'name:'-less jobs appear with their env, e.g.: PLATFORM=tgl -jobs: - include: - - # stage buildonly - - - &build-platform - stage: buildonly - before_install: - &docker-pull-sof - docker pull thesofproject/sof && docker tag thesofproject/sof sof - script: - ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh -r $PLATFORM - env: PLATFORM='jsl' - - - <<: *build-platform - env: PLATFORM='sue' - - - <<: *build-platform - env: PLATFORM='tgl' - - - <<: *build-platform - env: PLATFORM='tgl-h' - - - name: "./scripts/build-tools.sh Release" - before_install: *docker-pull-sof - script: CMAKE_BUILD_TYPE=Release ./scripts/docker-run.sh ./scripts/build-tools.sh - - # stage tests - - - &qemuboottest - stage: tests - script: - - echo CONFIG_HAVE_AGENT=n > src/arch/xtensa/configs/override/no-agent.config - - ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh -o no-agent -r $PLATFORM - - ./scripts/docker-qemu.sh ../sof.git/scripts/qemu-check.sh $PLATFORM - env: PLATFORM='byt cht' - before_install: - - *docker-pull-sof - - docker pull thesofproject/sofqemu && - docker tag thesofproject/sofqemu sofqemu - - - <<: *qemuboottest - env: PLATFORM='bdw hsw' - - - <<: *qemuboottest - env: PLATFORM='apl skl kbl' - - - <<: *qemuboottest - env: PLATFORM='cnl icl' - - - <<: *qemuboottest - env: PLATFORM='imx8 imx8x imx8m' - - - - name: testbench - before_install: *docker-pull-sof - script: - # testbench needs some topologies - - ./scripts/docker-run.sh ./scripts/build-tools.sh -t - - ./scripts/docker-run.sh ./scripts/rebuild-testbench.sh - - ./scripts/host-testbench.sh - - - - name: doxygen - - before_install: sudo apt-get -y install ninja-build doxygen graphviz - - script: - - # Show ALL warnings. Warnings don't cause doxygen to fail (yet). - - mkdir -p doxybuild && pushd doxybuild && cmake -GNinja -S ../doc && - ninja -v doc - - popd - - # Build again (it's very quick) and report a failure in Travis if - # any issue. This time doxygen stops on the first issue. - - printf 'WARN_AS_ERROR = YES\n' >> doc/sof.doxygen.in - - ninja -C doxybuild -v doc - -notifications: - irc: - channels: - - "chat.freenode.net#sof" - on_success: always - on_failure: always diff --git a/Kconfig.sof b/Kconfig.sof index 1a4c5dc98654..8a2cf6a1c833 100644 --- a/Kconfig.sof +++ b/Kconfig.sof @@ -150,11 +150,6 @@ config FAST_GET rsource "src/Kconfig" -# See zephyr/modules/Kconfig -if !ZEPHYR_SOF_MODULE - rsource "Kconfig.xtos-build" -endif - if ZEPHYR_SOF_MODULE rsource "Kconfig.zephyr-log" endif @@ -267,8 +262,4 @@ config DSP_RESIDENCY_COUNTERS R0, R1, R2 are abstract states which can be used differently based on platform implementation. -if !ZEPHYR_SOF_MODULE - rsource "Kconfig.xtos-dbg" -endif - endmenu diff --git a/Kconfig.xtos-build b/Kconfig.xtos-build deleted file mode 100644 index b2956a46bbf9..000000000000 --- a/Kconfig.xtos-build +++ /dev/null @@ -1,43 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -comment "XTOS options / compiler" - -choice - prompt "Optimization" - default OPTIMIZE_FOR_PERFORMANCE - help - Controls how compiler should optimize binary. - This config should affect only compiler settings and is - not meant to be used for conditional compilation of code. - -config OPTIMIZE_FOR_PERFORMANCE - bool "Optimize for performance" - help - Apply compiler optimizations prioritizing performance. - It means -O2 for GCC or equivalent for other compilers. - -config OPTIMIZE_FOR_SIZE - bool "Optimize for size" - help - Apply compiler optimizations prioritizing binary size. - It means -Os for GCC or equivalent for other compilers. - -config OPTIMIZE_FOR_DEBUG - bool "Optimize for debug" - help - Apply compiler optimizations prioritizing debugging experience. - It means -Og for GCC or equivalent for other compilers. - -config OPTIMIZE_FOR_NONE - bool "Don't optimize" - help - Apply no compiler optimizations. - It means -O0 for GCC or equivalent for other compilers. - -endchoice - -config BUILD_VM_ROM - bool "Build VM ROM" - default n - help - Select if you want to build VM ROM diff --git a/Kconfig.xtos-dbg b/Kconfig.xtos-dbg deleted file mode 100644 index 6994d2271994..000000000000 --- a/Kconfig.xtos-dbg +++ /dev/null @@ -1,23 +0,0 @@ -config DEBUG_HEAP - bool "Heap debug" - default n - help - Select for enable heap alloc debugging - -config DEBUG_BLOCK_FREE - bool "Blocks freeing debug" - default n - help - It enables checking if free was called multiple times on - already freed block of memory. Enabling this feature increases - number of memory writes and reads, due to checks for memory patterns - that may be performed on allocation and deallocation. - -config DEBUG_FORCE_COHERENT_BUFFER - bool "Force the allocator to allocate coherent buffer only" - default n - help - Select if we want to force the allocator to return coherent/uncached - buffer only. - This should be selected for debug purpose only, as accessing buffer - without caching it will reduce the read/write performance. diff --git a/scripts/docker-run.sh b/scripts/docker-run.sh index 96791d03a6e5..29383275152b 100755 --- a/scripts/docker-run.sh +++ b/scripts/docker-run.sh @@ -5,8 +5,6 @@ # Runs a given script in the docker container you can generate from the # docker_build directory. # Example: -# To build sof for tigerlake: -# ./scripts/docker-run.sh ./scripts/xtensa-build-all.sh tgl # To build topology: # ./scripts/docker-run.sh ./scripts/build-tools.sh diff --git a/scripts/set_xtensa_params.sh b/scripts/set_xtensa_params.sh index b4e506d1b52f..92e4d7d384a6 100644 --- a/scripts/set_xtensa_params.sh +++ b/scripts/set_xtensa_params.sh @@ -9,7 +9,6 @@ # These variables are currently used in/by: # -# - xtensa-build-all.sh (XTOS) # - script/rebuild-testbench.sh # - before Zephyr's `twister` or `west build` # diff --git a/scripts/test-repro-build.sh b/scripts/test-repro-build.sh index a6d0f7dad643..3a3cea4660fd 100755 --- a/scripts/test-repro-build.sh +++ b/scripts/test-repro-build.sh @@ -190,4 +190,6 @@ main() printf "\n\n ---- FAIL: differences found between %s/b0/ and b1/ --- \n\n" "$(pwd)" } +test -x "${SOF_TOP}"/scripts/xtensa-build-all.sh || exit 0 + main "$@" diff --git a/scripts/xtensa-build-all.sh b/scripts/xtensa-build-all.sh deleted file mode 100755 index b48ee2c1ff3b..000000000000 --- a/scripts/xtensa-build-all.sh +++ /dev/null @@ -1,318 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2018 Intel Corporation. All rights reserved. - -# stop on most errors -set -e - -# Platforms built and tested by default in CI using the `-a` option. -# They must have a toolchain available in the latest Docker image. -DEFAULT_PLATFORMS=( - imx8m - rn rmb vangogh - mt8186 mt8195 mt8188 -) - -# Work in progress can be added to this "staging area" without breaking -# the -a option for everyone. -SUPPORTED_PLATFORMS=( "${DEFAULT_PLATFORMS[@]}" ) - -# Waiting for container work in progress -SUPPORTED_PLATFORMS+=( mt8365 ) - -# Container work is in progress -SUPPORTED_PLATFORMS+=( acp_6_3 acp_7_0 mt8196) - -BUILD_ROM=no -BUILD_DEBUG=no -BUILD_FORCE_UP=no -BUILD_JOBS=$(nproc --all) -BUILD_VERBOSE= -PLATFORMS=() - -SOF_TOP=$(cd "$(dirname "$0")/.." && pwd) - -# As CMake forks one compiler process for each source file, the XTensa -# compiler spends much more time idle waiting for the license server -# over the network than actually using CPU or disk. A factor 3 has been -# found optimal for 16 nproc 25ms away from the server; your mileage may -# vary. -# -# The entire, purely local gcc build is so fast (~ 1s) that observing -# any difference between -j nproc and -j nproc*N is practically -# impossible so let's not waste RAM when building with gcc. - -if [ -n "$XTENSA_TOOLS_ROOT" ]; then - BUILD_JOBS=$((BUILD_JOBS * 3)) -fi - - -die() -{ - >&2 printf '%s ERROR: ' "$0" - # We want die() to be usable exactly like printf - # shellcheck disable=SC2059 - >&2 printf "$@" - exit 1 -} - -print_usage() -{ - cat <_defconfig file. Implements and saves the manual configuration -described in -https://thesofproject.github.io/latest/developer_guides/firmware/cmake.html - -usage: $0 [options] platform(s) - - -r Build rom if available (gcc only) - -a Build all default platforms fully supported by the latest Docker image and CI - -u Force CONFIG_MULTICORE=n - -d Enable debug build - -c Interactive menuconfig - -o arg, copies src/arch/xtensa/configs/override/.config - to the build directory after invoking CMake and before Make. - -k Configure rimage to use a non-default \${RIMAGE_PRIVATE_KEY} - DEPRECATED: use the more flexible \${PRIVATE_KEY_OPTION} below. - -v Verbose Makefile log - -i Optional IPC_VERSION: can be set to IPC3, IPC4 or an empty string. - If set to "IPCx" then CONFIG_IPC_MAJOR_x will be set. If set to - IPC4 then a platform specific overlay may be used. - -j n Set number of make build jobs. Jobs=#cores when no flag. - Infinite when not specified. - -m path to MEU tool. CMake disables rimage signing which produces a - .uns[igned] file signed by MEU. For a non-default key use the - PRIVATE_KEY_OPTION, see below. - -To use a non-default key you must define the right CMake parameter in the -following environment variable: - - PRIVATE_KEY_OPTION='-DMEU_PRIVATE_KEY=path/to/key' $0 -m /path/to/meu ... -or: - PRIVATE_KEY_OPTION='-DRIMAGE_PRIVATE_KEY=path/to/key' $0 ... - -This script supports XtensaTools but only when installed in a specific -directory structure, example: - -myXtensa/ -└── install/ - ├── builds/ - │   ├── RD-2012.5-linux/ - │   │   └── Intel_HiFiEP/ - │   └── RG-2017.8-linux/ - │   ├── LX4_langwell_audio_17_8/ - │   └── X4H3I16w2D48w3a_2017_8/ - └── tools/ - ├── RD-2012.5-linux/ - │   └── XtensaTools/ - └── RG-2017.8-linux/ - └── XtensaTools/ - -$ XTENSA_TOOLS_ROOT=/path/to/myXtensa $0 ... - -Known platforms: ${SUPPORTED_PLATFORMS[*]} - -EOF -} - -# parse the args -while getopts "rudi:j:ckvao:m:" OPTION; do - case "$OPTION" in - r) BUILD_ROM=yes ;; - u) BUILD_FORCE_UP=yes ;; - d) BUILD_DEBUG=yes ;; - i) IPC_VERSION=$OPTARG ;; - j) BUILD_JOBS=$OPTARG ;; - c) MAKE_MENUCONFIG=yes ;; - k) USE_PRIVATE_KEY=yes ;; - o) OVERRIDE_CONFIG=$OPTARG ;; - v) BUILD_VERBOSE='VERBOSE=1' ;; - a) PLATFORMS=("${DEFAULT_PLATFORMS[@]}") ;; - m) MEU_TOOL_PATH=$OPTARG ;; - *) print_usage; exit 1 ;; - esac -done -shift $((OPTIND-1)) - -#default signing tool -SIGNING_TOOL=RIMAGE - -if [ -n "${OVERRIDE_CONFIG}" ] -then - OVERRIDE_CONFIG="${SOF_TOP}/src/arch/xtensa/configs/override/$OVERRIDE_CONFIG.config" - [ -f "${OVERRIDE_CONFIG}" ] || die 'Invalid override config file %s\n' "${OVERRIDE_CONFIG}" -fi - -if [ -n "${MEU_TOOL_PATH}" ] -then - [ -d "${MEU_TOOL_PATH}" ] || die 'Invalid MEU TOOL PATH %s\n' "${MEU_TOOL_PATH}" - MEU_PATH_OPTION=-DMEU_PATH="${MEU_TOOL_PATH}" - SIGNING_TOOL=MEU -fi - -# parse platform args -for arg in "$@"; do - platform=none - for i in "${SUPPORTED_PLATFORMS[@]}"; do - if [ x"$i" = x"$arg" ]; then - PLATFORMS=("${PLATFORMS[@]}" "$i") - platform=$i - shift || true - break - fi - done - if [ "$platform" == "none" ]; then - echo "Error: Unknown platform specified: $arg" - echo "Known platforms are: ${SUPPORTED_PLATFORMS[*]}" - exit 1 - fi -done - -# check target platform(s) have been passed in -if [ ${#PLATFORMS[@]} -eq 0 ]; -then - echo "Error: No platform specified. Known platforms: " \ - "${SUPPORTED_PLATFORMS[*]}" - print_usage - exit 1 -fi - -if [ "x$USE_PRIVATE_KEY" == "xyes" ] -then - >&2 printf \ - 'WARNING: -k and RIMAGE_PRIVATE_KEY are deprecated, see usage.\n' - if [ -z ${RIMAGE_PRIVATE_KEY+x} ] - then - echo "Error: No variable specified for RIMAGE_PRIVATE_KEY" - exit 1 - fi - PRIVATE_KEY_OPTION="-DRIMAGE_PRIVATE_KEY=${RIMAGE_PRIVATE_KEY}" -fi - -OLDPATH=$PATH -CURDIR="$(pwd)" - -# build platforms -for platform in "${PLATFORMS[@]}" -do - - printf '\n ------\n %s\n ------\n' "$platform" - - HAVE_ROM='no' - DEFCONFIG_PATCH='' - PLATFORM_PRIVATE_KEY='' - - source "${SOF_TOP}"/scripts/set_xtensa_params.sh "$platform" || - die 'set_xtensa_params.sh failed' - - test -z "${PRIVATE_KEY_OPTION}" || PLATFORM_PRIVATE_KEY="${PRIVATE_KEY_OPTION}" - - if [ -n "$XTENSA_TOOLS_ROOT" ] - then - XTENSA_TOOLS_DIR="$XTENSA_TOOLS_ROOT/install/tools/$TOOLCHAIN_VER" - XTENSA_BUILDS_DIR="$XTENSA_TOOLS_ROOT/install/builds/$TOOLCHAIN_VER" - - [ -d "$XTENSA_TOOLS_DIR" ] || { - >&2 printf 'ERROR: %s\t is not a directory\n' "$XTENSA_TOOLS_DIR" - exit 1 - } - fi - - # CMake uses ROOT_DIR for includes and libraries a bit like - # --sysroot would. - ROOT="$SOF_TOP/../xtensa-root/$HOST" - - if [ -n "$XTENSA_TOOLS_ROOT" ] - then - TOOLCHAIN=xt - ROOT="$XTENSA_BUILDS_DIR/$XTENSA_CORE/xtensa-elf" - # CMake cannot set (evil) build-time environment variables at configure time: -# https://gitlab.kitware.com/cmake/community/-/wikis/FAQ#how-can-i-get-or-set-environment-variables - export XTENSA_SYSTEM=$XTENSA_BUILDS_DIR/$XTENSA_CORE/config - printf 'XTENSA_SYSTEM=%s\n' "${XTENSA_SYSTEM}" - PATH=$XTENSA_TOOLS_DIR/XtensaTools/bin:$OLDPATH - build_dir_suffix='xcc' - else - # Override SOF_CC_BASE from set_xtensa_params.sh - SOF_CC_BASE='gcc' - TOOLCHAIN=$HOST - PATH=$SOF_TOP/../$HOST/bin:$OLDPATH - build_dir_suffix='gcc' - DEFCONFIG_PATCH="" - fi - - BUILD_DIR=build_${platform}_${build_dir_suffix} - printf "Build in %s\n" "$BUILD_DIR" - - # only delete binary related to this build - rm -fr "$BUILD_DIR" - mkdir "$BUILD_DIR" - cd "$BUILD_DIR" - - printf 'PATH=%s\n' "$PATH" - ( set -x # log the main commands and their parameters - cmake -DTOOLCHAIN="$TOOLCHAIN" \ - -DSOF_CC_BASE="$SOF_CC_BASE" \ - -DROOT_DIR="$ROOT" \ - -DMEU_OPENSSL="${MEU_OPENSSL}" \ - "${MEU_PATH_OPTION}" \ - "${PLATFORM_PRIVATE_KEY}" \ - -DINIT_CONFIG=${PLATFORM}${DEFCONFIG_PATCH}_defconfig \ - -DEXTRA_CFLAGS="${EXTRA_CFLAGS}" \ - "$SOF_TOP" - ) - - if [ -n "$OVERRIDE_CONFIG" ] - then - cp "$OVERRIDE_CONFIG" override.config - fi - - if [[ "x$MAKE_MENUCONFIG" == "xyes" ]] - then - cmake --build . -- menuconfig - fi - - case "$IPC_VERSION" in - '') ;; - IPC3) - echo 'CONFIG_IPC_MAJOR_3=y' >> override.config - ;; - IPC4) - test -z "$IPC4_CONFIG_OVERLAY" || - cat "${SOF_TOP}/src/arch/xtensa/configs/override/$IPC4_CONFIG_OVERLAY.config" \ - >> override.config - echo 'CONFIG_IPC_MAJOR_4=y' >> override.config - ;; - *) die "Invalid -i '%s' argument\n" "$IPC_VERSION" ;; - esac - - if [[ "x$BUILD_DEBUG" == "xyes" ]] - then - echo "CONFIG_DEBUG=y" >> override.config - fi - - if [[ "x$BUILD_ROM" == "xyes" && "x$HAVE_ROM" == "xyes" ]] - then - echo "CONFIG_BUILD_VM_ROM=y" >> override.config - fi - - if [ "x$BUILD_FORCE_UP" == "xyes" ] - then - echo "Force building UP(xtensa)..." - echo "CONFIG_MULTICORE=n" >> override.config - fi - - if [ -e override.config ] - then - cmake --build . -- overrideconfig - fi - - cmake --build . -- bin -j "${BUILD_JOBS}" ${BUILD_VERBOSE} - - cd "$CURDIR" -done # for platform in ... - -# list all the images -ls -l build_*/*.ri build_*/src/arch/xtensa/rom*.bin || true -ls -l build_*/sof diff --git a/src/arch/xtensa/CMakeLists.txt b/src/arch/xtensa/CMakeLists.txt deleted file mode 100644 index 0b91a132d376..000000000000 --- a/src/arch/xtensa/CMakeLists.txt +++ /dev/null @@ -1,552 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -# platform-specific values - -if(CONFIG_IMX8) - set(platform_folder imx8) -elseif(CONFIG_IMX8X) - set(platform_folder imx8) -elseif(CONFIG_IMX8M) - set(platform_folder imx8m) -elseif(CONFIG_IMX8ULP) - set(platform_folder imx8ulp) -elseif(CONFIG_RENOIR) - set(platform_folder amd/renoir) -elseif(CONFIG_REMBRANDT) - set(platform_folder amd/rembrandt) -elseif(CONFIG_VANGOGH) - set(platform_folder amd/vangogh) -elseif(CONFIG_ACP_6_3) - set(platform_folder amd/acp_6_3) -elseif(CONFIG_ACP_7_0) - set(platform_folder amd/acp_7_0) -elseif(CONFIG_MT8186) - set(platform_folder mt8186) -elseif(CONFIG_MT8188) - set(platform_folder mt8188) -elseif(CONFIG_MT8195) - set(platform_folder mt8195) -elseif(CONFIG_MT8196) - set(platform_folder mt8196) -elseif(CONFIG_MT8365) - set(platform_folder mt8365) -else() - message(FATAL_ERROR "Platform not defined, check your Kconfiguration?") -endif() - -set(fw_name ${CONFIG_RIMAGE_SIGNING_SCHEMA}) - -# File name without directory -get_filename_component(_plf_ld_script ${platform_folder} NAME) -set(platform_ld_script ${_plf_ld_script}.x) -set(platform_rom_ld_script rom.x) - -if(CONFIG_AMD) - target_include_directories(sof_options INTERFACE ${PROJECT_SOURCE_DIR}/src/platform/amd/common/include) -endif() - -# includes - -# None of these should be included if Zephyr strict headers are used. -if (NOT CONFIG_SOF_ZEPHYR_STRICT_HEADERS) -target_include_directories(sof_options INTERFACE - ${PROJECT_SOURCE_DIR}/src/arch/xtensa/include - ${PROJECT_SOURCE_DIR}/src/arch/xtensa/xtos - ${PROJECT_SOURCE_DIR}/xtos/include -) -endif() - -target_include_directories(sof_options INTERFACE ${PROJECT_SOURCE_DIR}/src/platform/${platform_folder}/include) - -if(XCC) - target_include_directories(sof_options INTERFACE ${ROOT_DIR}/arch/include) -else() - target_include_directories(sof_options INTERFACE ${PROJECT_SOURCE_DIR}/src/platform/${platform_folder}/include/arch) -endif() - -target_include_directories(sof_options INTERFACE ${ROOT_DIR}/include) - -if(BUILD_UNIT_TESTS) - set(stdlib_flag "") -else() - set(stdlib_flag "-nostdlib") -endif() - -get_optimization_flag(optimization_flag) - -if(BUILD_CLANG_SCAN) - # pretend to be xtensa compiler to go trough the same paths for AST - target_compile_definitions(sof_options INTERFACE -D__XTENSA__=1) - - if(XCC) - # clang has to compile objects in order to analyze sources, - # so it needs xcc's headers - find_program(XCC_PATH NAMES "xt-xcc" PATHS ENV PATH NO_DEFAULT_PATH) - get_filename_component(XCC_DIR ${XCC_PATH} DIRECTORY) - target_include_directories(sof_options INTERFACE ${XCC_DIR}/../xtensa-elf/include) - endif() - - # Clang by default compiles for host architecture, - # but xtensa is always 32 bit, what may cause mismatch in definitions, - # that depend on bitness, so force compilation for 32 bit. - set(XTENSA_C_ASM_FLAGS -m32) - set(XTENSA_C_FLAGS) -else() - set(XTENSA_C_ASM_FLAGS -mlongcalls) - set(XTENSA_C_FLAGS -mtext-section-literals) -endif() - -# linker flags - GCC >= 10.x uses libc -if (CMAKE_COMPILER_IS_GNUCC AND CMAKE_C_COMPILER_VERSION VERSION_GREATER 10.0) - target_link_libraries(sof_options INTERFACE ${stdlib_flag} -lgcc -lc -Wl,--no-check-sections -ucall_user_start -Wl,-static) -else() - target_link_libraries(sof_options INTERFACE ${stdlib_flag} -lgcc -Wl,--no-check-sections -ucall_user_start -Wl,-static) -endif() - -# C & ASM flags -if(CONFIG_COMPILER_INLINE_FUNCTION_OPTION) - target_compile_options(sof_options INTERFACE ${stdlib_flag} ${XTENSA_C_ASM_FLAGS}) -else() - target_compile_options(sof_options INTERFACE ${stdlib_flag} -fno-inline-functions ${XTENSA_C_ASM_FLAGS}) -endif() - -# C flags -# TODO: Generator expressions are supported only with Make and Ninja, -# if we want to support other generators, we would have to find some other way -# for setting flags just for C files. -# Possible solutions: -# 1) CMAKE__FLAGS - works, but is global, we prefer target_* functions -# 2) set_source_files_properties - need to be done for each source file, it's -# better to have set of default flags and change it only for special cases -# 3) custom function that is used instead of target_sources and sets flags -# for each added source based on file extension - -separate_arguments(EXTRA_CFLAGS_AS_LIST NATIVE_COMMAND ${EXTRA_CFLAGS}) - -# No space between -imacros and its argument to avoid CMake -# de-duplication "feature" -target_compile_options(sof_options INTERFACE - $<$: - -${optimization_flag} -g - -Wall -Werror - -Wmissing-prototypes - -Wpointer-arith - ${XTENSA_C_FLAGS} - ${EXTRA_CFLAGS_AS_LIST} - > - -imacros${CONFIG_H_PATH} - ) - -if(BUILD_UNIT_TESTS) - # rest of this file is not needed for unit tests - return() -endif() - -if(XCC) - file(GLOB LINK_DEPS - ${ROOT_DIR}/arch/include/xtensa/config/core-isa*) -else() - file(GLOB LINK_DEPS - ${PROJECT_SOURCE_DIR}/src/platform/${platform_folder}/include/arch/xtensa/config/core-isa*) -endif() - -# linker scripts - -function(sof_add_ld_script binary_name script_name) - - set(lds_in ${PROJECT_SOURCE_DIR}/src/platform/${platform_folder}/${script_name}.in) - set(lds_out ${PROJECT_BINARY_DIR}/${script_name}) - - get_target_property(incdirs sof_options INTERFACE_INCLUDE_DIRECTORIES) - - set(iflags "") - set(glob_predicates "") - foreach(d ${incdirs}) - list(APPEND iflags "-I${d}") - list(APPEND glob_predicates "${d}/*.h") - endforeach() - - get_target_property(incdirs sof_public_headers INTERFACE_INCLUDE_DIRECTORIES) - - foreach(d ${incdirs}) - list(APPEND iflags "-I${d}") - list(APPEND glob_predicates "${d}/*.h") - endforeach() - - file(GLOB lds_headers ${glob_predicates}) - - add_custom_command(OUTPUT ${lds_out} - COMMAND ${CMAKE_C_COMPILER} -E -DLINKER -P ${iflags} -o ${lds_out} -x c ${lds_in} - -imacros${CONFIG_H_PATH} - DEPENDS ${lds_in} ${LINK_DEPS} genconfig ${CONFIG_H_PATH} ${lds_headers} - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} - COMMENT "Generating linker script: ${lds_out}" - VERBATIM - USES_TERMINAL - ) - - add_custom_target("ld_script_${script_name}" DEPENDS ${lds_out}) - add_dependencies(${binary_name} "ld_script_${script_name}") - set_target_properties(${binary_name} PROPERTIES LINK_DEPENDS ${lds_in}) -endfunction() - -sof_add_ld_script(sof ${platform_ld_script}) - -# binaries - -add_library(reset STATIC "") -target_link_libraries(reset sof_options) -target_compile_options(reset PRIVATE -mtext-section-literals) - -add_subdirectory(debug) -add_subdirectory(drivers) -add_subdirectory(hal) -add_subdirectory(lib) -add_subdirectory(schedule) -add_subdirectory(xtos) - -add_local_sources(reset xtos/memctl_default.S xtos/reset-vector.S) - -add_local_sources(sof - xtos/crt1-boards.S - xtos/_vectors.S - init.c - exc-dump.S -) - -# TODO: order of these libraries does matter, what is bad, -# we should switch to building with thin archives without symbols index -# and made it before final link so dependencies won't matter -target_link_libraries(sof_static_libraries INTERFACE xtos) -target_link_libraries(sof_static_libraries INTERFACE hal) - -if(CONFIG_XT_INTERRUPT_LEVEL_2) -target_link_libraries(sof_static_libraries INTERFACE xlevel2) -endif() -if(CONFIG_XT_INTERRUPT_LEVEL_3) -target_link_libraries(sof_static_libraries INTERFACE xlevel3) -endif() -if(CONFIG_XT_INTERRUPT_LEVEL_4) -target_link_libraries(sof_static_libraries INTERFACE xlevel4) -endif() -if(CONFIG_XT_INTERRUPT_LEVEL_5) -target_link_libraries(sof_static_libraries INTERFACE xlevel5) -endif() - -target_link_libraries(sof_static_libraries INTERFACE reset) - -target_link_libraries(sof_ld_flags INTERFACE "-Wl,-Map=sof.map") -target_link_libraries(sof_ld_flags INTERFACE "-T${PROJECT_BINARY_DIR}/${platform_ld_script}") - -# Copy the linker output from the top-level to this subdirectory -add_custom_target( - prepare_sof_post_process - # "global" .ELF target used everywhere and declared and produced at the top - DEPENDS sof - COMMAND ${CMAKE_COMMAND} -E copy ${PROJECT_BINARY_DIR}/sof sof-pre -) - -# contains steps that should be performed before fw image is ready for being -# processed by tools like rimage and MEU -add_custom_target(sof_post_process - DEPENDS process_base_module -) - -# contains extra output that should be generated for bin target -add_custom_target(bin_extras) - -# Do nothing / pass-through -add_custom_target(process_base_module - COMMAND ${CMAKE_COMMAND} -E copy sof-pre sof-${fw_name} - DEPENDS prepare_sof_post_process -) - -if(CONFIG_BUILD_VM_ROM) - add_executable(rom "") - target_link_libraries(rom PRIVATE sof_options) - target_link_libraries(rom PRIVATE "-T${PROJECT_BINARY_DIR}/${platform_rom_ld_script}") - sof_add_ld_script(rom ${platform_rom_ld_script}) - - # We have to make additional define, because sources - # are reused for other objects with different flags. - target_compile_definitions(rom PRIVATE -DCONFIG_VM_ROM) - - add_local_sources(rom - xtos/crt1-boards-rom.S - xtos/memctl_default.S - xtos/reset-vector.S - ) - - add_custom_target( - rom_dump - COMMAND ${CMAKE_COMMAND} -E copy rom rom-${fw_name} - COMMAND ${CMAKE_OBJCOPY} -O binary rom rom-${fw_name}.bin - COMMAND ${CMAKE_OBJDUMP} -h -D rom > rom-${fw_name}.lmap - COMMAND ${CMAKE_OBJDUMP} -S rom > rom-${fw_name}.lst - COMMAND ${CMAKE_OBJDUMP} -D rom > rom-${fw_name}.dis - DEPENDS rom - VERBATIM - USES_TERMINAL - ) - - add_dependencies(bin_extras rom_dump) -endif() - -if(BUILD_CLANG_SCAN) - # steps below don't compile parts of fw, - # so they are not needed for scan-build - return() -endif() - -add_custom_target( - sof_dump - COMMAND ${CMAKE_OBJDUMP} -S sof-${fw_name} > sof-${fw_name}.lst - COMMAND ${CMAKE_OBJDUMP} -h sof-${fw_name} > sof-${fw_name}.lmap - COMMAND ${CMAKE_OBJDUMP} -D sof-${fw_name} > sof-${fw_name}.dis - DEPENDS sof_post_process - VERBATIM - USES_TERMINAL -) - -include(ExternalProject) - -# smex - -ExternalProject_Add(smex_ep - DEPENDS check_version_h - DOWNLOAD_COMMAND "" - SOURCE_DIR "${PROJECT_SOURCE_DIR}/smex" - PREFIX "${PROJECT_BINARY_DIR}/smex_ep" - BINARY_DIR "${PROJECT_BINARY_DIR}/smex_ep/build" - EXCLUDE_FROM_ALL TRUE - BUILD_ALWAYS 1 - INSTALL_COMMAND "" -) - -add_custom_target( - run_smex - COMMAND ${PROJECT_BINARY_DIR}/smex_ep/build/smex - -l sof-${fw_name}.ldc - sof-${fw_name} - DEPENDS sof_post_process smex_ep - VERBATIM - USES_TERMINAL -) - -# rimage - -ExternalProject_Add(rimage_ep - DEPENDS check_version_h - SOURCE_DIR "${RIMAGE_TOP}" - PREFIX "${PROJECT_BINARY_DIR}/rimage_ep" - BINARY_DIR "${PROJECT_BINARY_DIR}/rimage_ep/build" - EXCLUDE_FROM_ALL TRUE - BUILD_ALWAYS 1 - INSTALL_COMMAND "" -) - -if(NOT DEFINED RIMAGE_PRIVATE_KEY) - set(RIMAGE_PRIVATE_KEY ${PROJECT_SOURCE_DIR}/keys/otc_private_key.pem) -endif() - -if(NOT DEFINED RIMAGE_IMR_TYPE) - # default value for non-production firmware - set(RIMAGE_IMR_TYPE 3) -endif() - -if(NOT MEU_OPENSSL) - set(MEU_OPENSSL "/usr/bin/openssl") -endif() -# Don't ask users to keep secret their openssl location depending on -# what they build in the moment. -set(silenceUnusedWarning "${MEU_OPENSSL}") - -if(MEU_PATH OR DEFINED MEU_NO_SIGN) # Don't sign with rimage - if(NOT DEFINED MEU_OFFSET) - execute_process( - COMMAND ${MEU_PATH}/meu -ver - OUTPUT_VARIABLE MEU_VERSION_FULL_TEXT - OUTPUT_STRIP_TRAILING_WHITESPACE - RESULT_VARIABLE meu_ver_res - ) - if(NOT ${meu_ver_res} EQUAL 0) - message(WARNING "${MEU_PATH}/meu -ver" - " failed with: ${meu_ver_res}") - endif() - - string(REGEX MATCH "Version:[\t\n ]*([^\t\n ]+)" ignored "${MEU_VERSION_FULL_TEXT}") - set(MEU_VERSION ${CMAKE_MATCH_1}) - - if(MEU_VERSION VERSION_LESS 12.0.0.1035) - set(MEU_OFFSET 1152) - elseif(MEU_VERSION VERSION_LESS 15.0.0.0) - set(MEU_OFFSET 1088) - else() - set(MEU_OFFSET 1344) - endif() - endif() - message(STATUS MEU_OFFSET=${MEU_OFFSET}) - - # Passing -s ${MEU_OFFSET} disables rimage signing and produces - # one .uns file and one .met file instead of a .ri file. - add_custom_target( - run_rimage - COMMAND ${PROJECT_BINARY_DIR}/rimage_ep/build/rimage - -o sof-${fw_name}.ri - -c "${RIMAGE_TOP}/config/${fw_name}.toml" - -s ${MEU_OFFSET} - -k ${RIMAGE_PRIVATE_KEY} - -i ${RIMAGE_IMR_TYPE} - -f ${SOF_MAJOR}.${SOF_MINOR}.${SOF_MICRO} - -b ${SOF_BUILD} - -e - sof-${fw_name} - DEPENDS sof_post_process rimage_ep - VERBATIM - USES_TERMINAL - ) - - if(NOT DEFINED MEU_FLAGS) - set(MEU_FLAGS - -f ${MEU_PATH}/generic_meu_conf.xml - -mnver 0.0.0.0 - -key ${MEU_PRIVATE_KEY} - -stp ${MEU_OPENSSL} - ${MEU_EXTRA_FLAGS} - ) - endif() - - if(MEU_NO_SIGN) - add_custom_target(run_meu DEPENDS run_rimage) - else() - add_custom_target( - run_meu - COMMAND ${MEU_PATH}/meu -w ./ -s sof-${fw_name} - ${MEU_FLAGS} - -o sof-${fw_name}.ri - DEPENDS run_rimage - VERBATIM - USES_TERMINAL - ) - endif() -else() # sign with rimage - add_custom_target( - run_rimage - COMMAND ${PROJECT_BINARY_DIR}/rimage_ep/build/rimage - -o sof-${fw_name}.ri - -c "${RIMAGE_TOP}/config/${fw_name}.toml" - -k ${RIMAGE_PRIVATE_KEY} - -i ${RIMAGE_IMR_TYPE} - -f ${SOF_MAJOR}.${SOF_MINOR}.${SOF_MICRO} - -b ${SOF_BUILD} - -e - sof-${fw_name} - DEPENDS sof_post_process rimage_ep - VERBATIM - USES_TERMINAL - ) - - add_custom_target(run_meu DEPENDS run_rimage) -endif() # sign with MEU / nothing / rimage - -if(NOT DEFINED FIRMWARE_NAME) - set(fw_output_name "${fw_name}") -else() - set(fw_output_name "${FIRMWARE_NAME}") -endif() - -if(${CMAKE_HOST_WIN32}) - set(GLUE_CMD copy /b sof-${fw_name}.ri.xman + sof-${fw_name}.ri sof-${fw_name}.rix) -else() - set(GLUE_CMD cat sof-${fw_name}.ri.xman sof-${fw_name}.ri > sof-${fw_name}.rix) -endif() - -add_custom_target( - glue_binary_files - COMMAND ${GLUE_CMD} - COMMAND ${CMAKE_COMMAND} -E remove sof-${fw_name}.ri - COMMAND ${CMAKE_COMMAND} -E rename sof-${fw_name}.rix sof-${fw_name}.ri - DEPENDS run_meu - VERBATIM - USES_TERMINAL - BYPRODUCTS sof-${fw_name}.ri # almost like a proper add_custom_command() -) - - -# Keep lists below in sync with rimage/config/*.toml - -# .ri not signed: nothing variable in it to erase. -# NXP -set(UNSIGNED_RI imx8 imx8x imx8m imx8ulp) -# AMD -list(APPEND UNSIGNED_RI rn rmb vangogh acp_6_3 acp_7_0) -# MediaTek -list(APPEND UNSIGNED_RI mt8186 mt8188 mt8195 mt8196 mt8365) - -if(${fw_name} IN_LIST UNSIGNED_RI) # mere copy - add_custom_command(OUTPUT reproducible.ri - COMMENT "Copying sof.ri to reproducible.ri as is" - # TODO: drop the glue_binary_files dependency once the - # edit-in-place and add_custom_target() messes are all fixed - DEPENDS glue_binary_files sof-${fw_name}.ri - COMMAND cmake -E copy sof-${fw_name}.ri reproducible.ri - VERBATIM - ) -else() # strip variables - add_custom_command(OUTPUT reproducible.ri - COMMENT "Creating reproducible.ri" - DEPENDS glue_binary_files sof-${fw_name}.ri - COMMAND ${PROJECT_SOURCE_DIR}/tools/sof_ri_info/sof_ri_info.py - --no_headers --no_modules --no_memory # could use a -q option... - --erase_vars reproducible.ri sof-${fw_name}.ri - VERBATIM - ) -endif() - - -# Top-level 'bin' target collecting all dependencies. Copies final .ri -# file from current subdirectory to top level build directory -# ${PROJECT_BINARY_DIR}. - -# TODO: get rid of ${fw_output_name} and ${fw_name} below. Different -# platforms are already built in different build directories, we will -# never support building multiple platforms in the same build directory. -if(MEU_NO_SIGN) - add_custom_target( - bin ALL - # copy rimage output that can be used to sign firmware - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ri.uns ${PROJECT_BINARY_DIR}/sof-${fw_output_name}.ri.uns - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ri.met ${PROJECT_BINARY_DIR}/sof-${fw_output_name}.ri.met - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ri.uns ${PROJECT_BINARY_DIR}/sof.ri.uns - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ri.met ${PROJECT_BINARY_DIR}/sof.ri.met - DEPENDS run_meu bin_extras glue_binary_files sof_dump - VERBATIM - USES_TERMINAL - ) -else() - add_custom_target( - bin ALL - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ri ${PROJECT_BINARY_DIR}/sof-${fw_output_name}.ri - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ri ${PROJECT_BINARY_DIR}/sof.ri - DEPENDS run_meu bin_extras glue_binary_files sof_dump reproducible.ri - VERBATIM - USES_TERMINAL - ) -endif() - -if(CONFIG_TRACE) - add_custom_target(copy_dictionaries - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ldc ${PROJECT_BINARY_DIR}/sof-${fw_output_name}.ldc - COMMAND ${CMAKE_COMMAND} -E copy sof-${fw_name}.ldc ${PROJECT_BINARY_DIR}/sof.ldc - DEPENDS run_smex - VERBATIM - USES_TERMINAL - ) - add_dependencies(bin copy_dictionaries) - endif() - -install( - FILES ${PROJECT_BINARY_DIR}/sof-${fw_output_name}.ri - ${PROJECT_BINARY_DIR}/sof-${fw_output_name}.ldc - DESTINATION bin -) diff --git a/src/arch/xtensa/debug/CMakeLists.txt b/src/arch/xtensa/debug/CMakeLists.txt deleted file mode 100644 index 2148cd3bd201..000000000000 --- a/src/arch/xtensa/debug/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -if (CONFIG_GDB_DEBUG) - add_subdirectory(gdb) -endif() diff --git a/src/arch/xtensa/debug/gdb/CMakeLists.txt b/src/arch/xtensa/debug/gdb/CMakeLists.txt deleted file mode 100644 index 2718ca511758..000000000000 --- a/src/arch/xtensa/debug/gdb/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -add_local_sources(sof init.S debugexception.S utilities.c) diff --git a/src/arch/xtensa/debug/gdb/debugexception.S b/src/arch/xtensa/debug/gdb/debugexception.S deleted file mode 100644 index d129261256e2..000000000000 --- a/src/arch/xtensa/debug/gdb/debugexception.S +++ /dev/null @@ -1,230 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Marcin Rajwa - */ - -/* - * Debug context switch. - * - */ - -#include -#include -#include - -#define DENSITY_BREAK_INS_IDENT 0x40 -#define NEXT_INST_OFFSET 0x03 -#define PS_EXCM_EXCEPTION_MODE 0x10 -#define PS_EXCM_MODE_MASK (~PS_EXCM_EXCEPTION_MODE) -#define DEBUG_GDB_MEM_LOC 0x9E008060 - -/* -Save special register designated by 'reg' into -backup space calculated by offset 'loc * 4' from -memory pointed by a3. -*/ -.macro SAVE_ reg, loc - rsr a1, \reg - s32i a1, a3, \loc * 4 -.endm - -.macro SAVE reg - SAVE_ \reg, \reg -.endm - -/* -Load special register designated by 'reg' from -backup space calculated by offset 'loc * 4' from -memory pointed by a3. -*/ -.macro LOAD_ reg, loc - l32i a1, a3, \loc * 4 - wsr a1, \reg -.endm - -.macro LOAD reg - LOAD_ \reg, \reg -.endm - - - -.text - -/* -Temporary stack for context switch -TODO: move it to dedicated GDB_STACK section -*/ - -gdb_stack: - .fill 0x1000 , 4 , 0 -gdb_stack_end: - -.global DebugExceptionEntry -.align 4 - -/* -Backup important special registers plus -all regular ones (whole register file). -Change EXCM field of PS back to normal mode -after an interrupt took place. -*/ -DebugExceptionEntry: - - movi a2, aregs - s32i a0, a2, 0 - s32i a1, a2, 4 - rsr a1, DEBUG_EXCSAVE - s32i a1, a2, 8 - s32i a3, a2, 12 - - movi a3, sregs - SAVE LBEG - SAVE LEND - SAVE LCOUNT - SAVE SAR - SAVE WINDOWBASE - SAVE WINDOWSTART - - rsr a1, DEBUG_PC - l8ui a2, a1, 1 - movi a0, DENSITY_BREAK_INS_IDENT - bne a2, a0, 1f - addi a1, a1, NEXT_INST_OFFSET -1: - s32i a1, a3, DEBUG_PC * 4 - - SAVE EXCSAVE_1 - SAVE_ DEBUG_PS, PS - SAVE EXCCAUSE - SAVE DEBUGCAUSE - SAVE EXCVADDR - - /* - (XCHAL_NUM_AREGS / 4 - 1) - A number which holds information on how many - registers are left to backup. Divide by four since we backup registers - in group of four. Minus one, since one group has already been saved. - */ - movi a1, XCHAL_NUM_AREGS / 4 - 1 - movi a2, aregs -1: - s32i a4, a2, 16 - s32i a5, a2, 20 - s32i a6, a2, 24 - s32i a7, a2, 28 - - addi a6, a2, 16 - addi a5, a1, -1 - rotw 1 - bnez a1, 1b - - movi a1, 1 - wsr a1, windowstart - movi a0, 0 - wsr a0, windowbase - rsync - - /* Setup of stack frame with 20 bytes for extra save area */ - movi a0, 0 - movi a1, gdb_stack + STACK_SIZE - 20 - rsr a2, PS - - /* Set exception mode back to normal */ - movi a3, PS_EXCM_MODE_MASK - and a2, a2, a3 - wsr a2, PS - rsync - - /* reset icountlevel - essential when coming back from single step */ - movi a2, 0x00 - wsr a2, ICOUNTLEVEL - - movi a4, gdb_handle_exception - callx4 a4 - -/* -Restore important special registers plus -all regular ones (whole register file). -Change EXCM field of PS back to exception mode -and return from interrupt. -*/ -DebugExceptionExit: - movi a2, DebugExceptionEntry - wsr a2, DEBUG_EXCSAVE - - rsr a4, PS - movi a3, PS_EXCM_EXCEPTION_MODE - or a4, a4, a3 - wsr a4, PS - rsync - - movi a3, sregs - LOAD LBEG - LOAD LEND - LOAD LCOUNT - LOAD SAR - LOAD WINDOWBASE - rsync - - movi a3, sregs - LOAD WINDOWSTART - LOAD DEBUG_PC - LOAD EXCSAVE_1 - LOAD EXCCAUSE - LOAD EXCVADDR - LOAD INTENABLE - rsync - - movi a6, aregs - movi a5, XCHAL_NUM_AREGS / 4 - 2 -1: - l32i a0, a6, 0 - l32i a1, a6, 4 - l32i a2, a6, 8 - l32i a3, a6, 12 - - beqz a5, 2f - addi a10, a6, 16 - addi a9, a5, -1 - rotw 1 - j 1b -2: - l32i a4, a6, 16 - l32i a5, a6, 20 - l32i a7, a6, 28 - l32i a6, a6, 24 - rotw 2 - - rfi XCHAL_DEBUGLEVEL - - -/* -Put some important interrupt related registers into memory window -pointed by DEBUG_GDB_MEM_LOC -*/ -.global gdb_debug_info -.align 4 - -gdb_debug_info: - - entry a1, 16 - movi a3, DEBUG_GDB_MEM_LOC - l32i a4, a2, 0 //load 4 bytes of message ID from incoming argv - rsr a4, EPC_1 - rsr a5, EPC_2 - rsr a6, EXCCAUSE - rsr a7, DEPC - rsr a8, DEBUG_PS - - s32i a4, a3, 0 - s32i a5, a3, 4 - s32i a6, a3, 8 - s32i a7, a3, 12 - s32i a8, a3, 16 - - isync - rsync - retw - -.size gdb_debug_info, . -gdb_debug_info diff --git a/src/arch/xtensa/debug/gdb/init.S b/src/arch/xtensa/debug/gdb/init.S deleted file mode 100644 index 336816aa41d4..000000000000 --- a/src/arch/xtensa/debug/gdb/init.S +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Marcin Rajwa - */ - -/* - * Init debug exeption and enable global breakpoints. - * - */ - -#include - -.text -.global gdb_init_debug_exception -.align 4 - -gdb_init_debug_exception: - entry a1, 16 - - movi a3, DebugExceptionEntry - wsr a3, DEBUG_EXCSAVE - - /* enable breakpoints */ - movi a3, 1 - wsr a3, DEBUG_IBREAKENABLE - - isync - rsync - retw - -.size gdb_init_debug_exception, . - gdb_init_debug_exception diff --git a/src/arch/xtensa/debug/gdb/utilities.c b/src/arch/xtensa/debug/gdb/utilities.c deleted file mode 100644 index 35579d763026..000000000000 --- a/src/arch/xtensa/debug/gdb/utilities.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2019 Intel Corporation. All rights reserved. -// -// Author: Marcin Rajwa - -/* - * Xtensa related functions for GDB. - * - */ -#define GDB_DISABLE_LOWER_INTERRUPTS_MASK ~0x1F - -#include -#include -#include - -void arch_gdb_read_sr(int sr) -{ - int val; - - asm volatile ("movi a3, 1f + 1\n" - "s8i %1, a3, 0\n" - "dhwb a3, 0\n" - "ihi a3, 0\n" - "isync\n" - "1:\n" - "rsr %0, lbeg\n" - : "=r"(val) - : "r"(sr) - : "a3", "memory"); -} - -void arch_gdb_write_sr(int sr, int *sregs) -{ - asm volatile ("movi a3, 1f + 1\n" - "s8i %1, a3, 0\n" - "dhwb a3, 0\n" - "ihi a3, 0\n" - "isync\n" - "1:\n" - "wsr %0, lbeg\n" - : - : "r"(sregs[sr]), "r"(sr) - : "a3", "memory"); -} - -unsigned char arch_gdb_load_from_memory(void *mem) -{ - unsigned long v; - unsigned long addr = (unsigned long)mem; - unsigned char ch; - - asm volatile ("_l32i %0, %1, 0\n" - : "=r"(v) - : "r"(addr & ~3) - : "memory"); - ch = v >> (addr & 3) * 8; - - return ch; -} - -void arch_gdb_memory_load_and_store(void *mem, unsigned char ch) -{ - unsigned long tmp; - unsigned long addr = (unsigned long)mem; - - asm volatile ("_l32i %0, %1, 0\n" - "and %0, %0, %2\n" - "or %0, %0, %3\n" - "_s32i %0, %1, 0\n" - "dhwb %1, 0\n" - "ihi %1, 0\n" - : "=&r"(tmp) - : "r"(addr & ~3), "r"(0xffffffff ^ (0xff << - (addr & 3) * 8)), - "r"(ch << (addr & 3) * 8) - : "memory"); -} - -void arch_gdb_single_step(int *sregs) -{ - /* leave debug just for one instruction */ - sregs[ICOUNT] = 0xfffffffe; - sregs[ICOUNTLEVEL] = XCHAL_DEBUGLEVEL; - /* disable low level interrupts */ - sregs[INTENABLE] &= ~GDB_DISABLE_LOWER_INTERRUPTS_MASK; - arch_gdb_write_sr(ICOUNTLEVEL, sregs); - arch_gdb_write_sr(ICOUNT, sregs); - arch_gdb_write_sr(INTENABLE, sregs); -} diff --git a/src/arch/xtensa/drivers/CMakeLists.txt b/src/arch/xtensa/drivers/CMakeLists.txt deleted file mode 100644 index 09565763093a..000000000000 --- a/src/arch/xtensa/drivers/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -add_local_sources(sof interrupt.c cache_attr.c) - -if(NOT CONFIG_ZEPHYR_SOF_MODULE) - add_local_sources(sof timer.c) -endif() diff --git a/src/arch/xtensa/drivers/interrupt.c b/src/arch/xtensa/drivers/interrupt.c deleted file mode 100644 index f9ca5ba252f1..000000000000 --- a/src/arch/xtensa/drivers/interrupt.c +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2020 Intel Corporation. All rights reserved. -// -// Author: Janusz Jankowski - -#include - -#if CONFIG_WAKEUP_HOOK -void arch_interrupt_on_wakeup(void) -{ - platform_interrupt_on_wakeup(); -} -#endif diff --git a/src/arch/xtensa/drivers/timer.c b/src/arch/xtensa/drivers/timer.c deleted file mode 100644 index 71c93590c293..000000000000 --- a/src/arch/xtensa/drivers/timer.c +++ /dev/null @@ -1,146 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2016 Intel Corporation. All rights reserved. -// -// Author: Liam Girdwood - -#include -#include -#include -#include -#include -#include -#include - -void timer_64_handler(void *arg) -{ - struct timer *timer = arg; - uint32_t ccompare; - - if (timer->id >= ARCH_TIMER_COUNT) - return; - - /* get comparator value - will tell us timeout reason */ - ccompare = xthal_get_ccompare(timer->id); - - /* is this a 32 bit rollover ? */ - if (ccompare == 1) { - /* roll over the timer */ - timer->hitime++; - arch_timer_clear(timer); - } else { - /* no roll over, run the handler */ - if (timer->handler) - timer->handler(timer->data); - } - - /* get next timeout value */ - if (timer->hitimeout == timer->hitime) { - /* timeout is in this 32 bit period */ - ccompare = timer->lowtimeout; - } else { - /* timeout is in another 32 bit period */ - ccompare = 1; - } - - xthal_set_ccompare(timer->id, ccompare); -} - -int timer64_register(struct timer *timer, void(*handler)(void *arg), void *arg) -{ - if (timer->id >= ARCH_TIMER_COUNT) - return -EINVAL; - - timer->handler = handler; - timer->data = arg; - timer->hitime = 0; - timer->hitimeout = 0; - - return 0; -} - -uint64_t arch_timer_get_system(struct timer *timer) -{ - uint64_t time = 0; - uint32_t flags; - uint32_t low; - uint32_t high; - uint32_t ccompare; - - if (timer->id >= ARCH_TIMER_COUNT) - goto out; - - ccompare = xthal_get_ccompare(timer->id); - - flags = arch_interrupt_global_disable(); - - /* read low 32 bits */ - low = xthal_get_ccount(); - - /* check and see whether 32bit IRQ is pending for timer */ - if (arch_interrupt_get_status() & (1 << timer->irq) && ccompare == 1) { - /* yes, overflow has occured but handler has not run */ - high = timer->hitime + 1; - } else { - /* no overflow */ - high = timer->hitime; - } - - time = ((uint64_t)high << 32) | low; - - arch_interrupt_global_enable(flags); - -out: - - return time; -} - -int64_t arch_timer_set(struct timer *timer, uint64_t ticks) -{ - uint32_t time = 1; - uint32_t hitimeout = ticks >> 32; - uint32_t flags; - int64_t ret; - - if (timer->id >= ARCH_TIMER_COUNT) { - ret = -EINVAL; - goto out; - } - - /* value of 1 represents rollover */ - if ((ticks & 0xffffffff) == 0x1) - ticks++; - - flags = arch_interrupt_global_disable(); - - /* same hi 64 bit context as ticks ? */ - if (hitimeout < timer->hitime) { - /* cant be in the past */ - arch_interrupt_global_enable(flags); - ret = -EINVAL; - goto out; - } - - /* check whether new timeout requires timer - * rollover. If so, ccompare value should - * be set to 1 in order to increment timer->hitime - * properly in timer_64_handler(). - */ - if (timer->hitime < hitimeout) - time = 1; - else - time = ticks; - - timer->hitimeout = hitimeout; - timer->lowtimeout = ticks; - - xthal_set_ccompare(timer->id, time); - - arch_interrupt_global_enable(flags); - - ret = ticks; - -out: - - return ret; -} diff --git a/src/arch/xtensa/exc-dump.S b/src/arch/xtensa/exc-dump.S deleted file mode 100644 index 060cc9c4a829..000000000000 --- a/src/arch/xtensa/exc-dump.S +++ /dev/null @@ -1,156 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - */ - -#include -#include -#include - - .section .text, "ax" - .align 64 -literals: - .literal_position - .global arch_dump_regs_a - .type arch_dump_regs_a, @function - -/* - * params: - * a2 - ptr to dump mem buffer - */ -arch_dump_regs_a: - entry a1, 16 - - /* all accessible physical registers */ - - s32i a0, a2, REG_OFFSET_AR_BEGIN - s32i a1, a2, REG_OFFSET_AR_BEGIN + 0x4 - s32i a2, a2, REG_OFFSET_AR_BEGIN + 0x8 - s32i a3, a2, REG_OFFSET_AR_BEGIN + 0xc - s32i a4, a2, REG_OFFSET_AR_BEGIN + 0x10 - s32i a5, a2, REG_OFFSET_AR_BEGIN + 0x14 - s32i a6, a2, REG_OFFSET_AR_BEGIN + 0x18 - s32i a7, a2, REG_OFFSET_AR_BEGIN + 0x1c - memw - - /* store PS */ - rsr a3, PS - s32i a3, a2, REG_OFFSET_PS - - /* - * copy original a2 to a3 as we will increment it in the loop with - * offset to AR registers with addition of 8 registers already read. - * It will be the base for next reg dump - */ - movi a3, REG_OFFSET_AR_BEGIN + 8*4 - add a3, a2, a3 - - /* - * storing rest of AREGS starts here - * a4 - number of 8-reg chunks to save (a0-a7 already done) - */ - movi a4, XCHAL_NUM_AREGS / 8 - 1 - - /* set exception mode if we are on core 0 */ - rsr a6, PRID - bnez a6, mask_interrupts_on_core0 - movi a5, PS_EXCM | PS_INTLEVEL(0x5) - wsr a5, PS - rsync - - /* exception mode set so no need to set interrupt mask */ - j store_register_loop - -mask_interrupts_on_core0: - /* - * if we are in core context different than 0 - * disable interrupts on core 0 - * only level 2 interrupts disabled for now on - */ - /* TODO */ - -store_register_loop: - s32i a8, a3, 0 - s32i a9, a3, 4 - s32i a10, a3, 8 - s32i a11, a3, 0xc - s32i a12, a3, 0x10 - s32i a13, a3, 0x14 - s32i a14, a3, 0x18 - s32i a15, a3, 0x1c - - addi a11, a3, 32 // after rotation a11 will be next a3 - addi a12, a4, -1 // after rotation a12 will be next a4 - iter decrement - /* - * restore registers from current window to preserve backtrace - * upon return - */ - addi a3, a3, -20 - l32i a4, a3, 4 - l32i a3, a3, 0 - rotw 2 - bnez a4, store_register_loop - rotw 2 - memw - -dump_special_registers: - rsr a6, EPC2 - s32i a6, a2, REG_OFFSET_EPC2 - rsr a6, EPC3 - s32i a6, a2, REG_OFFSET_EPC3 - rsr a6, EPC4 - s32i a6, a2, REG_OFFSET_EPC4 -#if XCHAL_INTLEVEL5_MASK - rsr a6, EPC5 - s32i a6, a2, REG_OFFSET_EPC5 -#endif -#if XCHAL_INTLEVEL6_MASK - rsr a6, EPC6 - s32i a6, a2, REG_OFFSET_EPC6 -#endif -#if XCHAL_INTLEVEL7_MASK - rsr a6, EPC7 - s32i a6, a2, REG_OFFSET_EPC7 -#endif - rsr a6, EPS2 - s32i a6, a2, REG_OFFSET_EPS2 - rsr a6, EPS3 - s32i a6, a2, REG_OFFSET_EPS3 - rsr a6, EPS4 - s32i a6, a2, REG_OFFSET_EPS4 -#if XCHAL_INTLEVEL5_MASK - rsr a6, EPS5 - s32i a6, a2, REG_OFFSET_EPS5 -#endif -#if XCHAL_INTLEVEL6_MASK - rsr a6, EPS6 - s32i a6, a2, REG_OFFSET_EPS6 -#endif -#if XCHAL_INTLEVEL7_MASK - rsr a6, EPS7 - s32i a6, a2, REG_OFFSET_EPS7 -#endif - rsr a6, DEPC - s32i a6, a2, REG_OFFSET_DEPC - rsr a6, DEBUGCAUSE - s32i a6, a2, REG_OFFSET_DEBUGCAUSE - rsr a6, EXCCAUSE - s32i a6, a2, REG_OFFSET_EXCCAUSE - rsr a6, INTERRUPT - s32i a6, a2, REG_OFFSET_INTERRUPT - rsr a6, EXCVADDR - s32i a6, a2, REG_OFFSET_EXCVADDR - rsr a6, EXCSAVE1 - s32i a6, a2, REG_OFFSET_EXCSAVE1 - rsr a6, WINDOWBASE - s32i a6, a2, REG_OFFSET_WINDOWBASE - rsr a6, WINDOWSTART - s32i a6, a2, REG_OFFSET_WINDOWSTART - - /* restore processor_state */ -restore_processor_state: - /* restore previously saved PS before return */ - l32i a3, a2, REG_OFFSET_PS - wsr a3, PS - rsync - retw diff --git a/src/arch/xtensa/hal/CMakeLists.txt b/src/arch/xtensa/hal/CMakeLists.txt deleted file mode 100644 index 605c3f1beb67..000000000000 --- a/src/arch/xtensa/hal/CMakeLists.txt +++ /dev/null @@ -1,143 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -set(STATE_DEFS - -D__SPLIT__extra_size - -D__SPLIT__extra_align - -D__SPLIT__cpregs_size - -D__SPLIT__cpregs_align - -D__SPLIT__cp_names - -D__SPLIT__all_extra_size - -D__SPLIT__all_extra_align - -D__SPLIT__num_coprocessors - -D__SPLIT__cp_num - -D__SPLIT__cp_max - -D__SPLIT__cp_mask - -D__SPLIT__cp_id_mappings - -D__SPLIT__cp_mask_mappings - -D__SPLIT__init_mem_extra - -D__SPLIT__init_mem_cp - -D__SPLIT__save_extra - -D__SPLIT__restore_extra - -D__SPLIT__save_cpregs - -D__SPLIT__save_cp0 - -D__SPLIT__save_cp1 - -D__SPLIT__save_cp2 - -D__SPLIT__save_cp3 - -D__SPLIT__save_cp4 - -D__SPLIT__save_cp5 - -D__SPLIT__save_cp6 - -D__SPLIT__save_cp7 - -D__SPLIT__restore_cpregs - -D__SPLIT__restore_cp0 - -D__SPLIT__restore_cp1 - -D__SPLIT__restore_cp2 - -D__SPLIT__restore_cp3 - -D__SPLIT__restore_cp4 - -D__SPLIT__restore_cp5 - -D__SPLIT__restore_cp6 - -D__SPLIT__restore_cp7 - -D__SPLIT__cpregs_save_fn - -D__SPLIT__cpregs_restore_fn - -D__SPLIT__validate_cp - -D__SPLIT__invalidate_cp - -D__SPLIT__get_cpenable - -D__SPLIT__set_cpenable -) - -set(DISASS_DEFS - -D__SPLIT__op0_format_lengths - -D__SPLIT__byte0_format_lengths - -D__SPLIT__disassemble_size - -D__SPLIT__disassemble -) - -set(MISC_DEFS - -D__SPLIT__clear_regcached_code -) - -# Call0 ABI means the xthal... and xthal..._nw functions are -# identical. If we're building for Call0 ABI, omit the ..._nw -# functions (except for xthal_get_intpending_nw, an interrupt handler -# helper function for which there is no duplicate and which does not -# obey _any_ calling conventions). -set(INTERRUPTS_DEFS - -D__SPLIT__num_intlevels - -D__SPLIT__num_interrupts - -D__SPLIT__excm_level - -D__SPLIT__intlevel - -D__SPLIT__get_intenable - -D__SPLIT__set_intenable - -D__SPLIT__get_interrupt - -D__SPLIT__set_intset - -D__SPLIT__set_intclear -) - -set(CACHE_DEFS - -D__SPLIT__get_cacheattr - -D__SPLIT__get_icacheattr - -D__SPLIT__set_cacheattr - -D__SPLIT__set_icacheattr - -D__SPLIT__set_dcacheattr - -D__SPLIT__set_idcacheattr - -D__SPLIT__idcache_is_enabled - -D__SPLIT__icache_is_enabled - -D__SPLIT__dcache_is_enabled - -D__SPLIT__idcache_is_enabled - -D__SPLIT__icache_all_invalidate - -D__SPLIT__dcache_all_invalidate - -D__SPLIT__dcache_all_writeback - -D__SPLIT__dcache_all_writeback_inv - -D__SPLIT__icache_all_unlock - -D__SPLIT__dcache_all_unlock - -D__SPLIT__icache_region_invalidate - -D__SPLIT__dcache_region_invalidate - -D__SPLIT__dcache_region_writeback - -D__SPLIT__dcache_region_writeback_inv - -D__SPLIT__icache_region_lock - -D__SPLIT__dcache_region_lock - -D__SPLIT__icache_region_unlock - -D__SPLIT__dcache_region_unlock - -D__SPLIT__icache_line_invalidate - -D__SPLIT__dcache_line_invalidate - -D__SPLIT__dcache_line_writeback - -D__SPLIT__dcache_line_writeback_inv - -D__SPLIT__icache_line_lock - -D__SPLIT__dcache_line_lock - -D__SPLIT__icache_line_unlock - -D__SPLIT__dcache_line_unlock - -D__SPLIT__icache_sync - -D__SPLIT__dcache_sync - -D__SPLIT__icache_get_ways - -D__SPLIT__icache_set_ways - -D__SPLIT__dcache_get_ways - -D__SPLIT__dcache_set_ways - -D__SPLIT__cache_coherence_on - -D__SPLIT__cache_coherence_off - -D__SPLIT__set_cache_prefetch_long - -D__SPLIT__set_cache_prefetch - -D__SPLIT__get_cache_prefetch - -D__SPLIT__hw_configid0 - -D__SPLIT__hw_configid1 - -D__SPLIT__release_major - -D__SPLIT__release_minor -) - -add_library(hal STATIC "") -target_link_libraries(hal sof_options) -target_compile_definitions(hal PRIVATE - ${STATE_DEFS} - ${DISASS_DEFS} - ${MISC_DEFS} - ${INTERRUPTS_DEFS} - ${CACHE_DEFS} -) - -add_local_sources(hal - cache_asm.S - clock.S - int_asm.S - interrupts.c - memcopy.S - windowspill_asm.S - atomics.c -) diff --git a/src/arch/xtensa/hal/atomics.c b/src/arch/xtensa/hal/atomics.c deleted file mode 100644 index d7abc128f966..000000000000 --- a/src/arch/xtensa/hal/atomics.c +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause - -#include -#include - -int _xt_atomic_compare_exchange_4(int *address, int32_t test_value, int32_t set_value); - -int _xt_atomic_compare_exchange_4(int *address, int32_t test_value, int32_t set_value) -{ - return xthal_compare_and_set(address, test_value, set_value); -} diff --git a/src/arch/xtensa/hal/attribute.c b/src/arch/xtensa/hal/attribute.c deleted file mode 100644 index 7d663b2e55fe..000000000000 --- a/src/arch/xtensa/hal/attribute.c +++ /dev/null @@ -1,282 +0,0 @@ -/* attribute.c - Cache attribute (memory access mode) related functions */ - -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/attribute.c#1 $ */ - -/* - * Copyright (c) 2004-2009 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - - - -/* - * Set the "cache attribute" (encoded memory access modes) - * of the region of memory specified by and . - * - * This function is only supported on processor configurations - * with region protection (or XEA1). It has no effect on - * a processor configured with an MMU (with autorefill). - * - * SPECIFYING THE MEMORY REGION - * The full (4 GB) address space may be specified with an - * address of zero and a size of 0xFFFFFFFF (or -1); - * in fact whenever + equal 0xFFFFFFFF, - * is interpreted as one byte greater than that specified. - * - * If the specified memory range exactly covers a series - * of consecutive 512 MB regions, the cache attributes of - * these regions are updated with the requested attribute. - * If this is not the case, e.g. if either or both the - * start and end of the range only partially cover a 512 MB - * region, one of three results are possible: - * - * 1. By default, the cache attribute of all regions - * covered, even just partially, is changed to - * the requested attribute. - * - * 2. If the XTHAL_CAFLAG_EXACT flag is specified, - * a non-zero error code is returned. - * - * 3. If the XTHAL_CAFLAG_NO_PARTIAL flag is specified - * (but not the EXACT flag), only regions fully - * covered by the specified range are updated with - * the requested attribute. - * - * WRITEBACK CACHE HANDLING - * This function automatically writes back dirty data when - * switching a region from writeback mode to a non-writeback mode. - * This writeback is done safely, ie. by first switching to writethrough - * mode, then invoking xthal_dcache_all_writeback(), then switching to - * the selected mode. Such a sequence is necessary to ensure - * there is no longer any dirty data in the memory region by the time - * this function returns, even in the presence of interrupts, speculation, etc. - * This avoids memory coherency problems when switching from writeback - * to bypass mode (in bypass mode, loads go directly to memory, ignoring - * any dirty data in the cache; also, such dirty data can still be castout - * due to seemingly unrelated stores). - * This automatic write-back can be disabled using the XTHAL_CAFLAG_NO_AUTO_WB flag. - * - * CACHE DISABLE THEN ENABLE HANDLING - * To avoid cache coherency issues when the cache is disabled, then - * memory is modified, then then cache is re-enabled (thus making - * visible stale cache entries), this function automatically - * invalidates the cache when any region switches to bypass mode. - * For efficiency, the entire cache is invalidated -- this is done - * using writeback-invalidate operations to ensure coherency even - * when other regions still have write-back caches enabled. - * This automatic invalidate can be disabled using the XTHAL_CAFLAG_NO_AUTO_INV flag. - * - * Parameters: - * vaddr starting virtual address of region of memory - * - * size number of bytes in region of memory - * (see above, SPECIFYING THE MEMORY REGION) - * - * cattr cache attribute (encoded); - * typically taken from compile-time HAL constants - * XCHAL_CA_{BYPASS[BUF], WRITETHRU, WRITEBACK[_NOALLOC], ILLEGAL} - * (defined in ); - * in XEA1, this corresponds to the value of a nibble - * in the CACHEATTR register; - * in XEA2, this corresponds to the value of the - * cache attribute (CA) field of each TLB entry - * - * On MPU configurations, the cattr is composed of accessRights - * and memoryType. The accessRights occupy bits 0..3 and are - * typically taken from the XTHAL_AR constants. The memory type - * is specified by either a bitwise or-ing of the XTHAL_MEM_... - * constants or if none of the XTHAL_MEM_... constants are - * specified, bits 4..12 are used for the memory type (that - * allows a cattr obtained by xthal_v2p() to be passed directly. - * - * In addition on MPU configurations if the - * XTHAL_MPU_USE_EXISTING_MEMORY_TYPE bit is set then the existing - * memoryType at the first address in the region is used for the - * memoryType of the new region. - * - * Likewise, if the XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS bit is set - * in cattr, then the existing accessRights at the first address - * in the region are used for the accessRights of the new region. - * - * flags bitwise combination of flags XTHAL_CAFLAG_* - * (see xtensa/hal.h for brief description of each flag); - * (see also various descriptions above); - * - * The XTHAL_CAFLAG_EXPAND flag prevents attribute changes - * to regions whose current cache attribute already provide - * greater access than the requested attribute. - * This ensures access to each region can only "expand", - * and thus continue to work correctly in most instances, - * possibly at the expense of performance. This helps - * make this flag safer to use in a variety of situations. - * For the purposes of this flag, cache attributes are - * ordered (in "expansion" order, from least to greatest - * access) as follows: - * XCHAL_CA_ILLEGAL no access allowed - * (various special and reserved attributes) - * XCHAL_CA_WRITEBACK writeback cached - * XCHAL_CA_WRITEBACK_NOALLOC writeback no-write-alloc - * XCHAL_CA_WRITETHRU writethrough cached - * XCHAL_CA_BYPASSBUF bypass with write buffering - * XCHAL_CA_BYPASS bypass (uncached) - * This is consistent with requirements of certain - * devices that no caches be used, or in certain cases - * that writethrough caching is allowed but not writeback. - * Thus, bypass mode is assumed to work for most/all types - * of devices and memories (albeit at reduced performance - * compared to cached modes), and is ordered as providing - * greatest access (to most devices). - * Thus, this XTHAL_CAFLAG_EXPAND flag has no effect when - * requesting the XCHAL_CA_BYPASS attribute (one can always - * expand to bypass mode). And at the other extreme, - * no action is ever taken by this function when specifying - * both the XTHAL_CAFLAG_EXPAND flag and the XCHAL_CA_ILLEGAL - * cache attribute. - * - * The XTHAL_CAFLAG_EXPAND is not supported on MPU configurations. - * - * Returns: - * 0 successful, or size is zero - * -1 XTHAL_CAFLAG_NO_PARTIAL flag specified and address range - * is valid with a non-zero size, however no 512 MB region (or page) - * is completely covered by the range - * -2 XTHAL_CAFLAG_EXACT flag specified, and address range does - * not exactly specify a 512 MB region (or page) - * -3 invalid address range specified (wraps around the end of memory) - * -4 function not supported in this processor configuration - */ -int xthal_set_region_attribute( void *vaddr, unsigned size, unsigned cattr, unsigned flags ) -{ -#if XCHAL_HAVE_MPU - if (cattr & 0xffffe000) // check if XTHAL mem flags were supplied - // in this case just pass cattr as the memType paramenter - return xthal_mpu_set_region_attribute(vaddr, size, cattr, cattr, flags); - else - // otherwise we take the bits 0-3 for accessRights and bits 4-13 as the memoryType - return xthal_mpu_set_region_attribute(vaddr, size, cattr & 0xf, (cattr & 0x1ff0) >> 4, flags); -#elif XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY - return -4; /* full MMU not supported */ -#else -/* These cache attribute encodings are valid for XEA1 and region protection only: */ -# if XCHAL_HAVE_PTP_MMU -# define CA_BYPASS XCHAL_CA_BYPASS -# ifdef XCHAL_CA_BYPASSBUF -# define CA_BYPASSBUF XCHAL_CA_BYPASSBUF -# else -# define CA_BYPASSBUF XCHAL_CA_BYPASS -# endif -# define CA_WRITETHRU XCHAL_CA_WRITETHRU -# define CA_WRITEBACK XCHAL_CA_WRITEBACK -# define CA_WRITEBACK_NOALLOC XCHAL_CA_WRITEBACK_NOALLOC -# define CA_ILLEGAL XCHAL_CA_ILLEGAL -# else -/* Hardcode these, because they get remapped when caches or writeback not configured: */ -# define CA_BYPASS 2 -# define CA_BYPASSBUF 6 -# define CA_WRITETHRU 1 -# define CA_WRITEBACK 4 -# define CA_WRITEBACK_NOALLOC 5 -# define CA_ILLEGAL 15 -# endif -# define CA_MASK 0xF /*((1L<> 29); - start_offset = ((unsigned)vaddr & 0x1FFFFFFF); - end_region = (end_vaddr >> 29); - end_offset = ((end_vaddr+1) & 0x1FFFFFFF); - if (flags & XTHAL_CAFLAG_EXACT) { - if (start_offset != 0 || end_offset != 0) - return -2; /* not an exact-sized range */ - } else if (flags & XTHAL_CAFLAG_NO_PARTIAL) { - if (start_offset != 0) - start_region++; - if (end_offset != 0) - end_region--; - if (start_region > end_region) - return -1; /* nothing fully covered by specified range */ - } - cacheattr = cachewrtr = xthal_get_cacheattr(); - cattr &= CA_MASK; -# if XCHAL_ICACHE_SIZE == 0 && XCHAL_DCACHE_SIZE == 0 - if (cattr == CA_WRITETHRU || cattr == CA_WRITEBACK || cattr == CA_WRITEBACK_NOALLOC) - cattr = CA_BYPASS; /* no caches configured, only do protection */ -# elif XCHAL_DCACHE_IS_WRITEBACK == 0 - if (cattr == CA_WRITEBACK || cattr == CA_WRITEBACK_NOALLOC) - cattr = CA_WRITETHRU; /* no writeback configured for data cache */ -# endif - for (i = start_region; i <= end_region; i++) { - unsigned sh = (i << 2); /* bit offset of nibble for region i */ - unsigned oldattr = ((cacheattr >> sh) & CA_MASK); - unsigned newattr = cattr; - if (flags & XTHAL_CAFLAG_EXPAND) { - /* This array determines whether a cache attribute can be changed - * from to with the EXPAND flag; an attribute's "pri" - * value (from this array) can only monotonically increase: */ - const static signed char _Xthal_ca_pri[16] = {[CA_ILLEGAL] = -1, - [CA_WRITEBACK] = 3, [CA_WRITEBACK_NOALLOC] = 3, [CA_WRITETHRU] = 4, [CA_BYPASSBUF] = 8, [CA_BYPASS] = 9 }; - if (_Xthal_ca_pri[newattr] < _Xthal_ca_pri[oldattr]) - newattr = oldattr; /* avoid going to lesser access */ - } - if (IS_CACHED(newattr) && !IS_CACHED(oldattr)) - disabled_cache = 1; /* we're disabling the cache for some region */ -# if XCHAL_DCACHE_IS_WRITEBACK - { - unsigned tmpattr = newattr; - if ((oldattr == CA_WRITEBACK || oldattr == CA_WRITEBACK_NOALLOC) - && newattr != CA_WRITEBACK && newattr != CA_WRITEBACK_NOALLOC) /* leaving writeback mode? */ - tmpattr = CA_WRITETHRU; /* leave it safely! */ - cachewrtr = ((cachewrtr & ~(CA_MASK << sh)) | (tmpattr << sh)); - } -# endif - cacheattr = ((cacheattr & ~(CA_MASK << sh)) | (newattr << sh)); - } -# if XCHAL_DCACHE_IS_WRITEBACK - if (cacheattr != cachewrtr /* need to leave writeback safely? */ - && (flags & XTHAL_CAFLAG_NO_AUTO_WB) == 0) { - xthal_set_cacheattr(cachewrtr); /* set to writethru first, to safely writeback any dirty data */ - xthal_dcache_all_writeback(); /* much quicker than scanning entire 512MB region(s) */ - } -# endif - xthal_set_cacheattr(cacheattr); - /* After disabling the cache, invalidate cache entries - * to avoid coherency issues when later re-enabling it: */ - if (disabled_cache && (flags & XTHAL_CAFLAG_NO_AUTO_INV) == 0) { - xthal_dcache_all_writeback_inv(); /* we might touch regions of memory still enabled write-back, - so must use writeback-invalidate, not just invalidate */ - xthal_icache_all_invalidate(); - } - return( 0 ); -#endif /* !(XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY) */ -} - diff --git a/src/arch/xtensa/hal/cache.c b/src/arch/xtensa/hal/cache.c deleted file mode 100644 index 4f6e292e6153..000000000000 --- a/src/arch/xtensa/hal/cache.c +++ /dev/null @@ -1,53 +0,0 @@ -// -// cache.c -- cache management routines -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/cache.c#1 $ - -// Copyright (c) 2002 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -// size of the cache lines in log2(bytes) -const unsigned char Xthal_icache_linewidth = XCHAL_ICACHE_LINEWIDTH; -const unsigned char Xthal_dcache_linewidth = XCHAL_DCACHE_LINEWIDTH; - -// size of the cache lines in bytes -const unsigned short Xthal_icache_linesize = XCHAL_ICACHE_LINESIZE; -const unsigned short Xthal_dcache_linesize = XCHAL_DCACHE_LINESIZE; - -// number of cache sets in log2(lines per way) -const unsigned char Xthal_icache_setwidth = XCHAL_ICACHE_SETWIDTH; -const unsigned char Xthal_dcache_setwidth = XCHAL_DCACHE_SETWIDTH; - -// cache set associativity (number of ways) -const unsigned int Xthal_icache_ways = XCHAL_ICACHE_WAYS; -const unsigned int Xthal_dcache_ways = XCHAL_DCACHE_WAYS; - -// size of the caches in bytes (ways * 2^(linewidth + setwidth)) -const unsigned int Xthal_icache_size = XCHAL_ICACHE_SIZE; -const unsigned int Xthal_dcache_size = XCHAL_DCACHE_SIZE; - -// cache features -const unsigned char Xthal_dcache_is_writeback = XCHAL_DCACHE_IS_WRITEBACK; -const unsigned char Xthal_icache_line_lockable = XCHAL_ICACHE_LINE_LOCKABLE; -const unsigned char Xthal_dcache_line_lockable = XCHAL_DCACHE_LINE_LOCKABLE; - diff --git a/src/arch/xtensa/hal/cache_asm.S b/src/arch/xtensa/hal/cache_asm.S deleted file mode 100644 index 9bb10438936b..000000000000 --- a/src/arch/xtensa/hal/cache_asm.S +++ /dev/null @@ -1,1073 +0,0 @@ -// -// cache_asm.S - assembly language cache management routines -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/cache_asm.S#1 $ - -// Copyright (c) 1999-2015 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include - - - - -//---------------------------------------------------------------------- -// Huge Range cache routines -//---------------------------------------------------------------------- - - // void xthal_dcache_hugerange_(void *addr, unsigned size); - // - // Invalidate and/or writeback dcache entries for an arbitrary large - // virtual address range with a single scan of the dcache. - // Assumes no address translation, i.e. virtual = physical. - // - // a2 = ptr to range - // a3 = size of range - // - // Note: -128 is a valid immediate for ADDI, but +128 is not, - // and ADDI can relax to ADDMI for multiples of 256. So scanning - // cache backwards (from end to start) allows all cache line sizes - // without creating an extra instruction for the ADDI. - // - .macro dcache_hugefunc name, instruction - .text - .align 4 - .type xthal_dcache_hugerange_\name,@function - .global xthal_dcache_hugerange_\name -xthal_dcache_hugerange_\name: - abi_entry -#if (!defined(XCHAL_HAVE_NX) || XCHAL_HAVE_NX == 0) && XCHAL_DCACHE_SIZE > 0 \ - && XCHAL_HAVE_DCACHE_TEST && XCHAL_HAVE_MINMAX && XCHAL_HAVE_LOOPS - movi a4, XCHAL_DCACHE_SIZE*2 // size at which to use huge algorithm - movi a7, -XCHAL_DCACHE_LINESIZE // for rounding to cache line size - bltu a3, a4, 7f // use normal (line-by-line hit) function -#if XCHAL_HAVE_PREFETCH - movi a11, 0 - xsr.prefctl a11 // temporarily disable prefetch (invalidates prefetch bufs!) -#endif - add a5, a3, a2 // a5 = end of range - and a4, a2, a7 // a4 = low end, rounded to containing cache line - addi a5, a5, /*XCHAL_DCACHE_LINESIZE*/-1 - and a5, a5, a7 // a5 = high end, rounded to containing cache line - movi a7, XCHAL_DCACHE_SIZE/XCHAL_DCACHE_LINESIZE // a7 = number of lines in dcache - movi a3, XCHAL_DCACHE_SIZE-XCHAL_DCACHE_LINESIZE // way index - mov a6, a5 - //movi a8, -XCHAL_DCACHE_SETSIZE // use if LDCT gives non-zero index bits - movi a10, (XCHAL_DCACHE_SIZE/XCHAL_DCACHE_WAYS) - 1 - - loopgtz a7, 1f - ldct a7, a3 // a3 = cache tag for cache entry [a7] - \instruction a2, 0 - .begin schedule - //extui a9, a3, 0, XCHAL_DCACHE_SETWIDTH+XCHAL_DCACHE_LINEWIDTH - and a9, a3, a10 - addi a3, a3, -XCHAL_DCACHE_LINESIZE - .end schedule - .begin schedule - //and a7, a7, a8 // uncomment if LDCT reports non-zero index bits - maxu a6, a6, a4 // a4 = low end of range - minu a2, a6, a5 // a5 = high end of range - or a6, a7, a9 - .end schedule -1: - - \instruction a2, 0 - maxu a6, a6, a4 - minu a2, a6, a5 - \instruction a2, 0 -#if XCHAL_HAVE_PREFETCH - wsr.prefctl a11 // restore prefetch -#endif - isync_return_nop - abi_return -#endif /* dcache supports hugerange */ -// Jump to non-huge routine -7: j.l xthal_dcache_region_\name + ABI_ENTRY_MINSIZE, a4 - .size xthal_dcache_hugerange_\name, . - xthal_dcache_hugerange_\name - .endm - - - - // void xthal_icache_hugerange_(void *addr, unsigned size); - // - // Invalidate icache entries for an arbitrary large - // virtual address range with a single scan of the icache. - // Assumes no address translation, i.e. virtual = physical. - // - // a2 = ptr to range - // a3 = size of range - // - // Note: -128 is a valid immediate for ADDI, but +128 is not, - // and ADDI can relax to ADDMI for multiples of 256. So scanning - // cache backwards (from end to start) allows all cache line sizes - // without creating an extra instruction for the ADDI. - // - .macro icache_hugefunc name, instruction - .text - .align 4 - .type xthal_icache_hugerange_\name,@function - .global xthal_icache_hugerange_\name -xthal_icache_hugerange_\name: - abi_entry -#if (!defined(XCHAL_HAVE_NX) || XCHAL_HAVE_NX == 0) &&XCHAL_ICACHE_SIZE > 0 && \ - XCHAL_HAVE_ICACHE_TEST && XCHAL_HAVE_MINMAX && XCHAL_HAVE_LOOPS - movi a4, XCHAL_ICACHE_SIZE*2 // size at which to use huge algorithm - movi a7, -XCHAL_ICACHE_LINESIZE // for rounding to cache line size - bltu a3, a4, 7f // use normal (line-by-line hit) function - add a5, a3, a2 // a5 = end of range - and a4, a2, a7 // a4 = low end, rounded to containing cache line - addi a5, a5, XCHAL_ICACHE_LINESIZE-1 - and a5, a5, a7 // a5 = high end, rounded to containing cache line - movi a7, XCHAL_ICACHE_SIZE/XCHAL_ICACHE_LINESIZE // a7 = number of lines in dcache - movi a3, XCHAL_ICACHE_SIZE-XCHAL_ICACHE_LINESIZE // way index - mov a6, a5 - //movi a8, -XCHAL_ICACHE_SETSIZE // use if LICT gives non-zero index bits - movi a10, (XCHAL_ICACHE_SIZE/XCHAL_ICACHE_WAYS) - 1 - - loopgtz a7, 1f - lict a7, a3 // a3 = cache tag for cache entry [a7] - \instruction a2, 0 - .begin schedule - //extui a9, a3, 0, XCHAL_ICACHE_SETWIDTH+XCHAL_ICACHE_LINEWIDTH - and a9, a3, a10 - addi a3, a3, -XCHAL_ICACHE_LINESIZE - .end schedule - .begin schedule - //and a7, a7, a8 // uncomment if LDCT reports non-zero index bits - maxu a6, a6, a4 // a4 = low end of range - minu a2, a6, a5 // a5 = high end of range - or a6, a7, a9 - .end schedule -1: - - \instruction a2, 0 - maxu a6, a6, a4 - minu a2, a6, a5 - \instruction a2, 0 - isync_return_nop - abi_return -#endif /* icache supports hugerange */ -7: j.l xthal_icache_region_\name + ABI_ENTRY_MINSIZE, a4 - .size xthal_icache_hugerange_\name, . - xthal_icache_hugerange_\name - .endm - - - - - .text - -//---------------------------------------------------------------------- -// Read CACHEATTR register -//---------------------------------------------------------------------- - -#if defined(__SPLIT__get_cacheattr) ||\ - defined(__SPLIT__get_cacheattr_nw) - -// unsigned xthal_get_cacheattr(void); - -DECLFUNC(xthal_get_cacheattr) -DECLFUNC(xthal_get_dcacheattr) -# if XCHAL_HAVE_CACHEATTR /* single CACHEATTR register used for both I and D */ -DECLFUNC(xthal_get_icacheattr) -# endif - abi_entry - dcacheattr_get - abi_return - endfunc - -#endif - -#if defined(__SPLIT__get_icacheattr) ||\ - defined(__SPLIT__get_icacheattr_nw) - -// unsigned xthal_get_icacheattr(void); - -# if !XCHAL_HAVE_CACHEATTR /* possibly independent CACHEATTR states used for I and D */ -DECLFUNC(xthal_get_icacheattr) - abi_entry - icacheattr_get - abi_return - endfunc -# endif - -#endif /*split*/ - - -//---------------------------------------------------------------------- -// Write CACHEATTR register, or equivalent. -//---------------------------------------------------------------------- - -/* - * Set CACHEATTR register in a safe manner. - * - * void xthal_set_cacheattr( unsigned new_cacheattr ); - * void xthal_set_icacheattr( unsigned new_cacheattr ); - * void xthal_set_dcacheattr( unsigned new_cacheattr ); - */ - -#if defined(__SPLIT__set_cacheattr) ||\ - defined(__SPLIT__set_cacheattr_nw) - -# if XCHAL_HAVE_CACHEATTR /* single CACHEATTR register used for both I and D accesses */ -DECLFUNC(xthal_set_icacheattr) -DECLFUNC(xthal_set_dcacheattr) -# endif -DECLFUNC(xthal_set_cacheattr) - abi_entry - cacheattr_set - abi_return - endfunc - -#endif /*split*/ - - -#if XCHAL_HAVE_CACHEATTR - - /* - * Already done above. - * - * Since we can't enable/disable the icache and dcache independently, - * and don't have a nice place to store a state which would enable - * us to only enable them both when both have been requested to be - * enabled, we simply enable both for any request to enable either, - * and disable both for any request to disable either cache. - */ - -#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - -# if defined(__SPLIT__set_icacheattr) \ - || defined(__SPLIT__set_icacheattr_nw) - -DECLFUNC(xthal_set_icacheattr) - abi_entry - icacheattr_set - isync_return_nop - abi_return - endfunc - -# endif - -# if defined(__SPLIT__set_dcacheattr) \ - || defined(__SPLIT__set_dcacheattr_nw) - -DECLFUNC(xthal_set_dcacheattr) - abi_entry - dcacheattr_set - abi_return - endfunc - -# endif /*split*/ - -#else /* full MMU (pre-v3): */ - -# if defined(__SPLIT__set_idcacheattr) \ - || defined(__SPLIT__set_idcacheattr_nw) - -// These functions aren't applicable to arbitrary MMU configurations. -// Do nothing in this case. - -DECLFUNC(xthal_set_icacheattr) -DECLFUNC(xthal_set_dcacheattr) - abi_entry - abi_return - endfunc - -# endif /*split*/ - -#endif /* cacheattr/MMU type */ - - -//---------------------------------------------------------------------- -// Determine (guess) whether caches are "enabled" -//---------------------------------------------------------------------- - -/* - * There is no "cache enable" bit in the Xtensa architecture, - * but we can use CACHEATTR (if it or its equivalent exists) - * as an indication that caches have been enabled. - */ - -#if XCHAL_HAVE_CACHEATTR - -# if defined(__SPLIT__idcache_is_enabled) || \ - defined(__SPLIT__idcache_is_enabled_nw) - -DECLFUNC(xthal_icache_is_enabled) -DECLFUNC(xthal_dcache_is_enabled) - abi_entry - cacheattr_is_enabled 2f - movi a2, 0 - abi_return -2: movi a2, 1 - abi_return - endfunc - -# endif /*split*/ - -#elif XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR - -# if defined(__SPLIT__icache_is_enabled) || \ - defined(__SPLIT__icache_is_enabled_nw) - -DECLFUNC(xthal_icache_is_enabled) - abi_entry - icacheattr_is_enabled 2f - movi a2, 0 - abi_return -2: movi a2, 1 - abi_return - endfunc - -# endif - -# if defined(__SPLIT__dcache_is_enabled) || \ - defined(__SPLIT__dcache_is_enabled_nw) - -DECLFUNC(xthal_dcache_is_enabled) - abi_entry - dcacheattr_is_enabled 2f - movi a2, 0 - abi_return -2: movi a2, 1 - abi_return - endfunc - -# endif /*split*/ - -#else - -// These functions aren't applicable to arbitrary MMU configurations. -// Assume caches are enabled in this case (!). - -# if defined(__SPLIT__idcache_is_enabled) || \ - defined(__SPLIT__idcache_is_enabled_nw) - -DECLFUNC(xthal_icache_is_enabled) -DECLFUNC(xthal_dcache_is_enabled) - abi_entry - movi a2, 1 - abi_return - endfunc -# endif /*split*/ - -#endif - - - -//---------------------------------------------------------------------- -// invalidate the icache -//---------------------------------------------------------------------- - -#if defined(__SPLIT__icache_all_invalidate) || \ - defined(__SPLIT__icache_all_invalidate_nw) - -// void xthal_icache_all_invalidate(void); - -DECLFUNC(xthal_icache_all_invalidate) - abi_entry - icache_invalidate_all a2, a3 - isync_return_nop - abi_return - endfunc - -//---------------------------------------------------------------------- -// invalidate the dcache -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_all_invalidate) || \ - defined(__SPLIT__dcache_all_invalidate_nw) - -// void xthal_dcache_all_invalidate(void); - -DECLFUNC(xthal_dcache_all_invalidate) - abi_entry - dcache_invalidate_all a2, a3 - abi_return - endfunc - -//---------------------------------------------------------------------- -// write dcache dirty data -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_all_writeback) || \ - defined(__SPLIT__dcache_all_writeback_nw) - -// void xthal_dcache_all_writeback(void); - -DECLFUNC(xthal_dcache_all_writeback) - abi_entry - dcache_writeback_all a2, a3, a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// write dcache dirty data and invalidate -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_all_writeback_inv) || \ - defined(__SPLIT__dcache_all_writeback_inv_nw) - -// void xthal_dcache_all_writeback_inv(void); - -DECLFUNC(xthal_dcache_all_writeback_inv) - abi_entry - dcache_writeback_inv_all a2, a3, a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// unlock instructions from icache -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_all_unlock) || \ - defined(__SPLIT__icache_all_unlock_nw) - -// void xthal_icache_all_unlock(void); - -DECLFUNC(xthal_icache_all_unlock) - abi_entry - icache_unlock_all a2, a3 - abi_return - endfunc - -//---------------------------------------------------------------------- -// unlock data from dcache -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_all_unlock) || \ - defined(__SPLIT__dcache_all_unlock_nw) - -// void xthal_dcache_all_unlock(void); - -DECLFUNC(xthal_dcache_all_unlock) - abi_entry - dcache_unlock_all a2, a3 - abi_return - endfunc - -//---------------------------------------------------------------------- -// invalidate the address range in the icache -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_region_invalidate) || \ - defined(__SPLIT__icache_region_invalidate_nw) - -// void xthal_icache_region_invalidate( void *addr, unsigned size ); - -DECLFUNC(xthal_icache_region_invalidate) - abi_entry - icache_invalidate_region a2, a3, a4 - isync_return_nop - abi_return - endfunc - -#endif - -#if defined(__SPLIT__icache_hugerange_invalidate) - -// void xthal_icache_hugerange_invalidate( void *addr, unsigned size ); -icache_hugefunc invalidate, ihi - -#endif - -#if defined(__SPLIT__icache_hugerange_unlock) - -# if XCHAL_ICACHE_LINE_LOCKABLE -// void xthal_icache_hugerange_unlock( void *addr, unsigned size ); -icache_hugefunc unlock, ihu -# endif - -#endif - -#if defined(__SPLIT__dcache_hugerange_invalidate) - -// void xthal_dcache_hugerange_invalidate( void *addr, unsigned size ); -dcache_hugefunc invalidate, dhi - -#endif - -#if defined(__SPLIT__dcache_hugerange_unlock) - -# if XCHAL_DCACHE_LINE_LOCKABLE -// void xthal_dcache_hugerange_unlock( void *addr, unsigned size ); -dcache_hugefunc unlock, dhu -# endif - -#endif - -#if defined(__SPLIT__dcache_hugerange_writeback) - -// void xthal_dcache_hugerange_writeback( void *addr, unsigned size ); -dcache_hugefunc writeback, dhwb - -#endif - -#if defined(__SPLIT__dcache_hugerange_writeback_inv) - -// void xthal_dcache_hugerange_writeback_inv( void *addr, unsigned size ); -dcache_hugefunc writeback_inv, dhwbi - - - -//---------------------------------------------------------------------- -// invalidate the address range in the dcache -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_region_invalidate) || \ - defined(__SPLIT__dcache_region_invalidate_nw) - -// void xthal_dcache_region_invalidate( void *addr, unsigned size ); - -DECLFUNC(xthal_dcache_region_invalidate) - abi_entry - dcache_invalidate_region a2, a3, a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// write dcache region dirty data -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_region_writeback) || \ - defined(__SPLIT__dcache_region_writeback_nw) - -// void xthal_dcache_region_writeback( void *addr, unsigned size ); - -DECLFUNC(xthal_dcache_region_writeback) - abi_entry - dcache_writeback_region a2, a3, a4, a5 - abi_return - endfunc - -//---------------------------------------------------------------------- -// write dcache region dirty data and invalidate -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_region_writeback_inv) || \ - defined(__SPLIT__dcache_region_writeback_inv_nw) - -// void xthal_dcache_region_writeback_inv( void *addr, unsigned size ); - -DECLFUNC(xthal_dcache_region_writeback_inv) - abi_entry - dcache_writeback_inv_region a2, a3, a4, a5 - abi_return - endfunc - -//---------------------------------------------------------------------- -// lock instructions in icache region -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_region_lock) || \ - defined(__SPLIT__icache_region_lock_nw) - -// void xthal_icache_region_lock(void); - -DECLFUNC(xthal_icache_region_lock) - abi_entry - icache_lock_region a2, a3, a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// lock data in dcache region -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_region_lock) || \ - defined(__SPLIT__dcache_region_lock_nw) - -// void xthal_dcache_region_lock(void); - -DECLFUNC(xthal_dcache_region_lock) - abi_entry - dcache_lock_region a2, a3, a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// unlock instructions from icache region -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_region_unlock) || \ - defined(__SPLIT__icache_region_unlock_nw) - -// void xthal_icache_region_unlock(void); - -DECLFUNC(xthal_icache_region_unlock) - abi_entry - icache_unlock_region a2, a3, a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// unlock data from dcache region -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_region_unlock) || \ - defined(__SPLIT__dcache_region_unlock_nw) - -// void xthal_dcache_region_unlock(void); - -DECLFUNC(xthal_dcache_region_unlock) - abi_entry - dcache_unlock_region a2, a3, a4 - abi_return - endfunc - - -//---------------------------------------------------------------------- -// invalidate single icache line -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_line_invalidate) || \ - defined(__SPLIT__icache_line_invalidate_nw) - -// void xthal_icache_line_invalidate(void *addr); - -DECLFUNC(xthal_icache_line_invalidate) - abi_entry - icache_invalidate_line a2, 0 - isync_return_nop - abi_return - endfunc - - -//---------------------------------------------------------------------- -// invalidate single dcache line -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_line_invalidate) || \ - defined(__SPLIT__dcache_line_invalidate_nw) - -// void xthal_dcache_line_invalidate(void *addr); - -DECLFUNC(xthal_dcache_line_invalidate) - abi_entry - dcache_invalidate_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// write single dcache line dirty data -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_line_writeback) || \ - defined(__SPLIT__dcache_line_writeback_nw) - -// void xthal_dcache_line_writeback(void *addr); - -DECLFUNC(xthal_dcache_line_writeback) - abi_entry - dcache_writeback_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// write single dcache line dirty data and invalidate -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_line_writeback_inv) || \ - defined(__SPLIT__dcache_line_writeback_inv_nw) - -// void xthal_dcache_line_writeback_inv(void *addr); - -DECLFUNC(xthal_dcache_line_writeback_inv) - abi_entry - dcache_writeback_inv_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// lock instructions in icache line -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_line_lock) || \ - defined(__SPLIT__icache_line_lock_nw) - -// void xthal_icache_line_lock(void); - -DECLFUNC(xthal_icache_line_lock) - abi_entry - icache_lock_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// lock data in dcache line -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_line_lock) || \ - defined(__SPLIT__dcache_line_lock_nw) - -// void xthal_dcache_line_lock(void); - -DECLFUNC(xthal_dcache_line_lock) - abi_entry - dcache_lock_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// unlock instructions from icache line -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_line_unlock) || \ - defined(__SPLIT__icache_line_unlock_nw) - -// void xthal_icache_line_unlock(void); - -DECLFUNC(xthal_icache_line_unlock) - abi_entry - icache_unlock_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// unlock data from dcache line -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_line_unlock) || \ - defined(__SPLIT__dcache_line_unlock_nw) - -// void xthal_dcache_line_unlock(void); - -DECLFUNC(xthal_dcache_line_unlock) - abi_entry - dcache_unlock_line a2, 0 - abi_return - endfunc - -//---------------------------------------------------------------------- -// sync icache and memory (???) -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__icache_sync) || \ - defined(__SPLIT__icache_sync_nw) - -// void xthal_icache_sync(void); - -DECLFUNC(xthal_icache_sync) - abi_entry - icache_sync a2 - isync_return_nop - abi_return - endfunc - -//---------------------------------------------------------------------- -// sync dcache and memory (???) -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__dcache_sync) || \ - defined(__SPLIT__dcache_sync_nw) - -// void xthal_dcache_sync(void); - -DECLFUNC(xthal_dcache_sync) - abi_entry - dcache_sync a2 - abi_return - endfunc - -//---------------------------------------------------------------------- -// Get/Set icache number of ways enabled -//---------------------------------------------------------------------- - -#endif - -#if defined (__SPLIT__icache_get_ways) || \ - defined (__SPLIT__icache_get_ways_nw) - -// unsigned int xthal_icache_get_ways(void); - -DECLFUNC(xthal_icache_get_ways) - abi_entry - icache_get_ways a2 - abi_return - endfunc - -#endif - -#if defined (__SPLIT__icache_set_ways) || \ - defined(__SPLIT__icache_set_ways_nw) - -/// void xthal_icache_set_ways(unsigned int ways); - -DECLFUNC(xthal_icache_set_ways) - abi_entry - icache_set_ways a2 a3 a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// Get/Set dcache number of ways enabled -//---------------------------------------------------------------------- - -#endif - -#if defined (__SPLIT__dcache_get_ways) || \ - defined (__SPLIT__dcache_get_ways_nw) - -// unsigned int xthal_dcache_get_ways(void); - -DECLFUNC(xthal_dcache_get_ways) - abi_entry - dcache_get_ways a2 - abi_return - endfunc - -#endif - -#if defined (__SPLIT__dcache_set_ways) || \ - defined (__SPLIT__dcache_set_ways_nw) - -// void xthal_dcache_set_ways(unsigned int ways); - -DECLFUNC(xthal_dcache_set_ways) - abi_entry - dcache_set_ways a2 a3 a4 - abi_return - endfunc - -//---------------------------------------------------------------------- -// opt into and out of coherence -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__cache_coherence_on) || \ - defined(__SPLIT__cache_coherence_on_nw) - -// The opt-in routine assumes cache was initialized at reset, -// so it's equivalent to the low-level coherence_on routine. - -// void xthal_cache_coherence_optin(void) -// void xthal_cache_coherence_on(void) - -DECLFUNC(xthal_cache_coherence_optin) -DECLFUNC(xthal_cache_coherence_on) - abi_entry - cache_coherence_on a2, a3 - abi_return - endfunc - -#endif - -#if defined(__SPLIT__cache_coherence_off) || \ - defined(__SPLIT__cache_coherence_off_nw) - -// The coherence_off routines should not normally be called directly. -// Use the xthal_cache_coherence_optout() C routine instead -// (which first empties the cache). - -// void xthal_cache_coherence_off - -DECLFUNC(xthal_cache_coherence_off) - abi_entry - cache_coherence_off a2, a3 - abi_return - endfunc - - -//---------------------------------------------------------------------- -// Control cache prefetch -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__set_cache_prefetch_long) || \ - defined(__SPLIT__set_cache_prefetch_long_nw) - -# if XCHAL_HAVE_BE -# define aH a2 /* msb word = prefctl mask */ -# define aL a3 /* lsb word = prefctl value */ -# else -# define aH a3 /* msb word = prefctl mask */ -# define aL a2 /* lsb word = prefctl value */ -# endif - -// Set cache prefetch state (-1=enable, 0=disable, and see XTHAL_*PREFETCH_*), -// and return previous one. -// -// int xthal_set_cache_prefetch_long( unsigned long long ); -// -DECLFUNC(xthal_set_cache_prefetch_long) - abi_entry -# if XCHAL_HAVE_PREFETCH - movi a5, XCHAL_CACHE_PREFCTL_DEFAULT - addi a4, aL, 1 // does prefctl value aL == -1 ? - moveqz aL, a5, a4 // if yes (XTHAL_PREFETCH_ENABLE), set it to default - movgez a2, aL, aL // if the high bit is not set, then we want to transfer the contents of aL to prefctl - // so we move it to a2 - bgez aL, 1f // high bit set indicates masked update - ssai 16 // 16-bit right shifts - src a5, aL, aH // get 16-bit-swapped 32-bit value - src a5, a5, a5 // get 32-bit value (rotate by 16) - rsr.prefctl a4 - src a3, aH, aL // get 32-bit mask - or a4, a4, a3 // set masked bits - xor a4, a4, a3 // clear masked bits - and a5, a5, a3 // only use masked bits - or a2, a4, a5 // combine masked bits -1: -# if XCHAL_HW_MIN_VERSION <= XTENSA_HWVERSION_RC_2010_1 /* for erratum #325 */ - j 1f ; .align 8 ; 1: xsr.prefctl a2 ; isync // ensure XSR.PREFCTL;ISYNC wholly within an icache line -# else - xsr.prefctl a2 -# endif -# else - movi a2, 0 -# endif - abi_return - endfunc - -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__set_cache_prefetch) || \ - defined(__SPLIT__set_cache_prefetch_nw) - -// FOR BACKWARD COMPATIBILITY WITH PRE-RF RELEASE OBJECT CODE ONLY. -// Set cache prefetch state (-1=enable, 0=disable, and see the -// definitions of XTHAL_*PREFETCH_* with only the lower 32 bits set), -// and return previous one. -// int xthal_set_cache_prefetch( int ) -// -DECLFUNC(xthal_set_cache_prefetch) - abi_entry -# if XCHAL_HAVE_PREFETCH - movi a3, XCHAL_CACHE_PREFCTL_DEFAULT - addi a4, a2, 1 // does a2 == -1 ? - moveqz a2, a3, a4 // if yes (XTHAL_PREFETCH_ENABLE), set it to default - bbci.l a2, 31, 1f // high bit set indicates masked update - rsr.prefctl a4 - extui a5, a2, 16, 15 - or a4, a4, a5 // set masked bits - xor a4, a4, a5 // clear masked bits - and a2, a2, a5 // only use masked bits - or a2, a4, a2 // combine masked bits -1: -# if XCHAL_HW_MIN_VERSION <= XTENSA_HWVERSION_RC_2010_1 /* for erratum #325 */ - j 1f ; .align 8 ; 1: xsr.prefctl a2 ; isync // ensure XSR.PREFCTL;ISYNC wholly within an icache line -# else - xsr.prefctl a2 -# endif -# else - movi a2, 0 -# endif - abi_return - endfunc - -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__get_cache_prefetch) ||\ - defined(__SPLIT__get_cache_prefetch_nw) - -// Return current cache prefetch state. -// int xthal_get_cache_prefetch( void ) -DECLFUNC(xthal_get_cache_prefetch) - abi_entry -# if XCHAL_HAVE_PREFETCH - rsr.prefctl a2 -# else - movi a2, 0 -# endif - abi_return - endfunc - -//---------------------------------------------------------------------- -// Misc configuration info -//---------------------------------------------------------------------- -#endif - -// Eventually these will move to their own file: -#if defined(__SPLIT__hw_configid0) - .set xthals_hw_configid0, XCHAL_HW_CONFIGID0 -#endif - -#if defined(__SPLIT__hw_configid1) - .set xthals_hw_configid1, XCHAL_HW_CONFIGID1 -#endif - -#if defined(__SPLIT__release_major) - .set xthals_release_major, XTHAL_RELEASE_MAJOR -#endif - -#if defined(__SPLIT__release_minor) - .set xthals_release_minor, XTHAL_RELEASE_MINOR - -#endif /*split*/ - - .global xthals_hw_configid0, xthals_hw_configid1 - .global xthals_release_major, xthals_release_minor - -//---------------------------------------------------------------------- - diff --git a/src/arch/xtensa/hal/clock.S b/src/arch/xtensa/hal/clock.S deleted file mode 100644 index 7ea86d3f1b15..000000000000 --- a/src/arch/xtensa/hal/clock.S +++ /dev/null @@ -1,138 +0,0 @@ -// -// clock.S - assembly language clock routines -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/clock.S#1 $ - -// Copyright (c) 2003-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - -// A useful looping macro: -// 'iterate' invokes 'what' (an instruction, pseudo-op or other macro) -// multiple times, passing it a numbered parameter from 'from' to 'to' -// inclusively. Does not invoke 'what' at all if from > to. -// Maximum difference between 'from' and 'to' is 99 minus nesting depth -// (GNU 'as' doesn't allow nesting deeper than 100). -// - .macro iterate from, to, what - .ifeq ((\to-\from) & ~0xFFF) - \what \from - iterate "(\from+1)", \to, \what - .endif - .endm // iterate - - -//---------------------------------------------------------------------- -// Read CCOUNT register -//---------------------------------------------------------------------- - -// unsigned xthal_get_ccount(void) -// - .global xthal_get_ccount - .type xthal_get_ccount,@function - .align 4 -xthal_get_ccount: - abi_entry -#if XCHAL_HAVE_CCOUNT - rsr.ccount a2 -/* - * The following alternative (in absence of CCOUNT) doesn't work well, - * because ICOUNT is often used for debugging. (And when it isn't, - * one would have to ensure that ICOUNTLEVEL is high enough and that - * ICOUNT is incremented to zero in the debug exception handler upon - * ICOUNT exceptions.) - * - * #elif XCHAL_HAVE_DEBUG - * rsr.icount a2 // no CCOUNT, return ICOUNT if available - */ -#else - movi a2, 0 // else no counter at all, just return zero -#endif - abi_return - .size xthal_get_ccount, . - xthal_get_ccount - - -//---------------------------------------------------------------------- -// Access CCOMPAREn registers -//---------------------------------------------------------------------- - -// void xthal_set_ccompare(int, unsigned) -// - .global xthal_set_ccompare - .type xthal_set_ccompare,@function - .align 4 -xthal_set_ccompare: - abi_entry -#if XCHAL_NUM_TIMERS > 0 - bnez a2, 1f - wsr.ccompare0 a3 - rsync - abi_return -#endif -1: -#if XCHAL_NUM_TIMERS > 1 - bnei a2, 1, 2f - wsr.ccompare1 a3 - rsync - abi_return -#endif -2: -#if XCHAL_NUM_TIMERS > 2 - bnei a2, 2, 3f - wsr.ccompare2 a3 - rsync -#endif -3: - abi_return - .size xthal_set_ccompare, . - xthal_set_ccompare - - -// unsigned xthal_get_ccompare(int) -// - .global xthal_get_ccompare - .type xthal_get_ccompare,@function - .align 4 -xthal_get_ccompare: - abi_entry -#if XCHAL_NUM_TIMERS > 0 - bnez a2, 1f - rsr.ccompare0 a2 - abi_return -#endif -1: -#if XCHAL_NUM_TIMERS > 1 - bnei a2, 1, 2f - rsr.ccompare1 a2 - abi_return -#endif -2: -#if XCHAL_NUM_TIMERS > 2 - bnei a2, 2, 3f - rsr.ccompare2 a2 - abi_return -#endif -3: - movi a2, 0 - abi_return - .size xthal_get_ccompare, . - xthal_get_ccompare - diff --git a/src/arch/xtensa/hal/coherence.c b/src/arch/xtensa/hal/coherence.c deleted file mode 100644 index 066557325bcf..000000000000 --- a/src/arch/xtensa/hal/coherence.c +++ /dev/null @@ -1,59 +0,0 @@ -/* coherence.c - Cache coherence opt-in / opt-out functions */ - -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/coherence.c#1 $ */ - -/* - * Copyright (c) 2008 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - - -/* - * Opt-out of cache coherence. - * - * Caveat: on a core with full MMU, cache attribute handling done here only - * works well with the default (reset) TLB mapping of eight 512MB regions. - * It likely won't work correctly when other page sizes are in use (it may - * appear to work but be open to race conditions, depending on situation). - */ -void xthal_cache_coherence_optout( void ) -{ -#if XCHAL_HAVE_EXTERN_REGS && XCHAL_DCACHE_IS_COHERENT - unsigned ca = xthal_get_cacheattr(); - /* Writeback all dirty entries. Writethru mode avoids new dirty entries. */ - xthal_set_region_attribute(0,0xFFFFFFFF, XCHAL_CA_WRITETHRU, XTHAL_CAFLAG_EXPAND); - xthal_dcache_all_writeback(); - /* Invalidate all cache entries. Cache-bypass mode avoids new entries. */ - xthal_set_region_attribute(0,0xFFFFFFFF, XCHAL_CA_BYPASS, XTHAL_CAFLAG_EXPAND); - xthal_dcache_all_writeback_inv(); - /* Wait for everything to settle. */ - asm("memw"); - xthal_dcache_sync(); - xthal_icache_sync(); - /* Opt-out of cache coherency protocol. */ - xthal_cache_coherence_off(); - /* Restore cache attributes, as of entry to this function. */ - xthal_set_cacheattr(ca); -#endif -} - diff --git a/src/arch/xtensa/hal/debug.c b/src/arch/xtensa/hal/debug.c deleted file mode 100644 index 9e2b2654d303..000000000000 --- a/src/arch/xtensa/hal/debug.c +++ /dev/null @@ -1,525 +0,0 @@ -// -// debug.c - debug related constants and functions -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/debug.c#1 $ - -// Copyright (c) 2002 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - - -/* 1 if debug option configured, 0 if not: */ -const int Xthal_debug_configured = XCHAL_HAVE_DEBUG; - -/* Number of instruction and data break registers: */ -const int Xthal_num_ibreak = XCHAL_NUM_IBREAK; -const int Xthal_num_dbreak = XCHAL_NUM_DBREAK; - - -#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE -/* This array is incorrect: */ -const unsigned short Xthal_ill_inst_16[16] = -{ -#if XCHAL_HAVE_BE - 0xfd0f, 0xfd1f, 0xfd2f, 0xfd3f, - 0xfd4f, 0xfd5f, 0xfd6f, 0xfd7f, - 0xfd8f, 0xfd9f, 0xfdaf, 0xfdbf, - 0xfdcf, 0xfddf, 0xfdef, 0xfdff -#else - 0xf0fd, 0xf1fd, 0xf2fd, 0xf3fd, - 0xf4fd, 0xf5fd, 0xf6fd, 0xf7fd, - 0xf8fd, 0xf9fd, 0xfafd, 0xfbfd, - 0xfcfd, 0xfdfd, 0xfefd, 0xfffd -#endif -}; -#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */ - - -#undef XTHAL_24_BIT_BREAK -#undef XTHAL_16_BIT_BREAK -#define XTHAL_24_BIT_BREAK 0x80000000 -#define XTHAL_16_BIT_BREAK 0x40000000 - - - -// set software breakpoint and synchronize cache -unsigned int -xthal_set_soft_break(void *addr) -{ - unsigned inst; - int is24bit = (xthal_disassemble_size( (unsigned char *)addr ) == 3); - unsigned int ret_val; - -#if XCHAL_HAVE_BE - inst = ((((char *)addr)[0])<<24) + - ((((char *)addr)[1])<<16) + - ((((char *)addr)[2])<<8); -#else - inst = ((((char *)addr)[0])) + - ((((char *)addr)[1])<<8) + - ((((char *)addr)[2])<<16); -#endif -#if XCHAL_HAVE_BE - if (is24bit) { - ret_val = XTHAL_24_BIT_BREAK & ((inst>>8)&0xffffff); - ((unsigned char *)addr)[0] = 0x00; - ((unsigned char *)addr)[1] = 0x04; - ((unsigned char *)addr)[2] = 0x00; - } else { - ret_val = XTHAL_16_BIT_BREAK & ((inst>>16)&0xffff); - ((unsigned char *)addr)[0] = 0xD2; - ((unsigned char *)addr)[1] = 0x0f; - } -#else - if (is24bit) { - ret_val = XTHAL_24_BIT_BREAK & (inst&0xffffff); - ((unsigned char *)addr)[0] = 0x00; - ((unsigned char *)addr)[1] = 0x40; - ((unsigned char *)addr)[2] = 0x00; - } else { - ret_val = XTHAL_16_BIT_BREAK & (inst&0xffff); - ((unsigned char *)addr)[0] = 0x2D; - ((unsigned char *)addr)[1] = 0xf0; - } -#endif - *((unsigned int *)addr) = inst; -#if XCHAL_DCACHE_IS_WRITEBACK - xthal_dcache_region_writeback((void*)addr, 3); -#endif -#if XCHAL_ICACHE_SIZE > 0 - xthal_icache_region_invalidate((void*)addr, 3); -#endif - return ret_val; -} - - -// remove software breakpoint and synchronize cache -void -xthal_remove_soft_break(void *addr, unsigned int inst) -{ -#if XCHAL_HAVE_BE - if (inst&XTHAL_24_BIT_BREAK) { - ((unsigned char *)addr)[0] = (inst>>16)&0xff; - ((unsigned char *)addr)[1] = (inst>>8)&0xff; - ((unsigned char *)addr)[2] = inst&0xff; - } else { - ((unsigned char *)addr)[0] = (inst>>8)&0xff; - ((unsigned char *)addr)[1] = inst&0xff; - } -#else - ((unsigned char *)addr)[0] = inst&0xff; - ((unsigned char *)addr)[1] = (inst>>8)&0xff; - if (inst&XTHAL_24_BIT_BREAK) - ((unsigned char *)addr)[2] = (inst>>16)&0xff; -#endif -#if XCHAL_DCACHE_IS_WRITEBACK - xthal_dcache_region_writeback((void*)addr, 3); -#endif -#if XCHAL_ICACHE_SIZE > 0 - xthal_icache_region_invalidate((void*)addr, 3); -#endif -} - - - - -#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE - -// return instruction type -unsigned int -xthal_inst_type(void *addr) -{ - unsigned int inst_type = 0; - unsigned inst; -// unsigned int inst = *((unsigned int *)addr); - unsigned char op0, op1, op2; - unsigned char i, m, n, r, s, t, z; - -#if XCHAL_HAVE_BE - inst = ((((char *)addr)[0])<<24) + - ((((char *)addr)[1])<<16) + - ((((char *)addr)[2])<<8); - op0 = inst>>28; - op1 = (inst>>12)&0xf; - op2 = (inst>>16)&0xf; - i = (inst>>27)&0x1; - z = (inst>>26)&0x1; - m = (inst>>24)&0x3; - n = (inst>>26)&0x3; - r = (inst>>16)&0xf; - s = (inst>>20)&0xf; - t = (inst>>24)&0xf; -#else - inst = ((((char *)addr)[0])) + - ((((char *)addr)[1])<<8) + - ((((char *)addr)[2])<<16); - op0 = inst&0xf; - op1 = (inst&0xf0000)>>16; - op2 = (inst&0xf00000)>>20; - i = (inst&0x80)>>7; - z = (inst&0x40)>>6; - m = (inst&0xc0)>>6; - n = (inst&0x30)>>4; - r = (inst&0xf000)>>12; - s = (inst&0xf00)>>8; - t = (inst&0xf0)>4; -#endif - switch (op0) { - case 0x0: - inst_type |= XTHAL_24_BIT_INST; - if ((op1==0)&&(op2==0)) - switch (r) { - case 0: - if (m==0x2) { - if (!(n&0x2)) // RET, RETW - inst_type |= XTHAL_RET_INST; - else if (n==0x2) // JX - inst_type |= (XTHAL_JUMP_INST|XTHAL_DEST_REG_INST); - inst_type |= (s<<28); - } else if (m==3) // CALLX - inst_type |= (XTHAL_JUMP_INST|XTHAL_DEST_REG_INST); - inst_type |= (s<<28); - break; - case 0x3: - if (t==0) - switch (s) { - case 0x0: // RFE - inst_type |= XTHAL_RFE_INST; - break; - case 0x1: // RFUE - inst_type |= XTHAL_RFUE_INST; - break; - case 0x4: // RFW - case 0x5: - inst_type |= XTHAL_RFW_INST; - break; - } - else if (t==1) // RFI - inst_type |= XTHAL_RFI_INST; - break; - case 0x4: // BREAK - inst_type |= XTHAL_BREAK_INST; - break; - case 0x5: // SYSCALL - inst_type |= XTHAL_SYSCALL_INST; - break; - } - break; - case 0x5: // CALL - inst_type |= XTHAL_24_BIT_INST; - inst_type |= (XTHAL_JUMP_INST|XTHAL_DEST_REL_INST); - break; - case 0x6: // B - inst_type |= XTHAL_24_BIT_INST; - if (n==0) // J - inst_type |= (XTHAL_JUMP_INST|XTHAL_DEST_REL_INST); - else if ((n==0x1)||(n==0x2)) - inst_type |= (XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST); - else { - if (m&0x2) - inst_type |= (XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST); - else if ((m==0x1)&&((r==0x0)||(r==0x1))) - inst_type |= (XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST); - } - break; - case 0x7: // B - inst_type |= XTHAL_24_BIT_INST; - inst_type |= (XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST); - break; -#if XCHAL_HAVE_DENSITY - case 0x8: // L32I.N - case 0x9: // S32I.N - case 0xA: // ADD.N - case 0xb: // ADDI.N - inst_type |= XTHAL_16_BIT_INST; - break; - case 0xc: - inst_type |= XTHAL_16_BIT_INST; // MOVI.N BEQZ.N, BNEZ.N - if (i) - inst_type |= (XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST); - break; - case 0xd: // MOV.N NOP.N - inst_type |= XTHAL_16_BIT_INST; - if (r==0xf) - switch(t) { - case 0x0: - case 0x1: - inst_type |= XTHAL_RET_INST; // RET.N, RETW.N - break; - case 0x2: - inst_type |= XTHAL_BREAK_INST; // BREAK.N - break; - } - break; -#endif /* XCHAL_HAVE_DENSITY */ - default: - inst_type |= XTHAL_24_BIT_INST; - } - return inst_type; -} - -// returns branch address -unsigned int -xthal_branch_addr(void *addr) -{ - unsigned int b_addr = (unsigned int) addr; - unsigned inst; -// unsigned int inst = *((unsigned int *)addr); - int offset; - unsigned int inst_type = xthal_inst_type(addr); - unsigned int inst_type_mask; -#if XCHAL_HAVE_BE - inst = ((((char *)addr)[0])<<24) + - ((((char *)addr)[1])<<16) + - ((((char *)addr)[2])<<8); -#else - inst = ((((char *)addr)[0])) + - ((((char *)addr)[1])<<8) + - ((((char *)addr)[2])<<16); -#endif -#if XCHAL_HAVE_DENSITY - inst_type_mask = XTHAL_16_BIT_INST|XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST; - if ((inst_type&inst_type_mask)==inst_type_mask) { -# if XCHAL_HAVE_BE - b_addr += (4+((inst&0x3000000)>>20)+((inst&0xf0000)>>16)); -# else - b_addr += (4+(inst&0x30)+((inst&0xf000)>>12)); -# endif - } -#endif /* XCHAL_HAVE_DENSITY */ - inst_type_mask = XTHAL_24_BIT_INST|XTHAL_BRANCH_INST|XTHAL_DEST_REL_INST; - if ((inst_type&inst_type_mask)==inst_type_mask) { -#if XCHAL_HAVE_BE - if ((inst&0xf0000000)==0x70000000) - offset = ((int)(inst<<16))>>24; - else if ((inst&0xf2000000)==0x62000000) - offset = ((int)(inst<<16))>>24; - else - offset = ((int)(inst<<12))>>20; -#else - if ((inst&0xf)==0x7) - offset = ((int)(inst<<8))>>24; - else if ((inst&0x2f)==0x26) - offset = ((int)(inst<<8))>>24; - else - offset = ((int)(inst<<8))>>20; -#endif - b_addr += 4 + offset; - } - inst_type_mask = XTHAL_24_BIT_INST|XTHAL_JUMP_INST|XTHAL_DEST_REL_INST; - if ((inst_type&inst_type_mask)==inst_type_mask) { -#if XCHAL_HAVE_BE - if ((inst&0xfc000000)==0x60000000) - offset = ((int)(inst<<6))>>14; - else - { - b_addr &= 0xfffffffc; - offset = ((int)(inst<<6))>>12; - } -#else - if ((inst&0x3f)==0x6) - offset = ((int)(inst<<8))>>14; - else - { - b_addr &= 0xfffffffc; - offset = ((int)(inst<<8))>>12; - } -#endif - b_addr += 4 + offset; - } - return b_addr; -} - -// return pc of next instruction for a given state -unsigned int xthal_get_npc(XTHAL_STATE *user_state) -{ - unsigned inst_type; - unsigned npc; - inst_type = xthal_inst_type((void *)user_state->pc); - if (inst_type & XTHAL_24_BIT_INST) - npc = user_state->pc + 3; - else - npc = user_state->pc + 2; - if (inst_type & XTHAL_RFW_INST) { - /* Can not debug level 1 interrupts */ - // xt_panic(); - } else if (inst_type & XTHAL_RFUE_INST) { - /* Can not debug level 1 interrupts */ - // xt_panic(); - } else if (inst_type & XTHAL_RFI_INST) { - /* Can not debug level 1 interrupts */ - // xt_panic(); - } else if (inst_type & XTHAL_RFE_INST) { - /* Can not debug level 1 interrupts */ - // xt_panic(); - } else if (inst_type & XTHAL_RET_INST) { - npc = (user_state->pc&0xc0000000)+(user_state->ar[0]&0x3fffffff); - } else if (inst_type & XTHAL_BREAK_INST) { - /* Can not debug break */ - // xt_panic(); - } else if (inst_type & XTHAL_SYSCALL_INST) { - /* Can not debug exceptions */ - // xt_panic(); - } else if (inst_type & XTHAL_LOOP_END) { - // xt_panic(); - } else if (inst_type & XTHAL_JUMP_INST) { - if (inst_type & XTHAL_DEST_REG_INST) { - return user_state->ar[inst_type>>28]; - } else if (inst_type & XTHAL_DEST_REL_INST) { - return xthal_branch_addr((void *)user_state->pc); - } - } else if (inst_type & XTHAL_BRANCH_INST) { - int branch_taken = 0; - unsigned short inst; - unsigned char op0, t, s, r, m, n; - memcpy(&inst, (void *)user_state->pc, 2); -#if XCHAL_HAVE_BE - op0 = (inst&0xf000)>>12; - t = (inst&0x0f00)>>8; - s = (inst&0x00f0)>>4; - r = (inst&0x000f); - m = t&3; - n = t>>2; -#else - op0 = (inst&0x000f); - t = (inst&0x00f0)>>4; - s = (inst&0x0f00)>>8; - r = (inst&0xf000)>>12; - m = t>>2; - n = t&3; -#endif - if (inst_type &XTHAL_16_BIT_INST) { -#if XCHAL_HAVE_BE - if (inst&0x400) /* BNEZ.N */ - branch_taken = (user_state->ar[(inst>>4)&0xf]!=0); - else /* BEQZ.N */ - branch_taken = (user_state->ar[(inst>>4)&0xf]==0); -#else - if (inst&0x40) /* BNEZ.N */ - branch_taken = (user_state->ar[(inst>>8)&0xf]!=0); - else /* BEQZ.N */ - branch_taken = (user_state->ar[(inst>>8)&0xf]==0); -#endif - } - if (op0==0x6) { - if (n==1) { - if (m==0) { /* BEQZ */ - branch_taken = (user_state->ar[s]==0); - } else if (m==1) { /* BNEZ */ - branch_taken = (user_state->ar[s]!=0); - } else if (m==2) { /* BLTZ */ - branch_taken = (((int)user_state->ar[s])<0); - } else if (m==3) { /* BGEZ */ - branch_taken = (((int)user_state->ar[s])>=0); - } - } else if (n==2) { - int b4const[16] = - { -1, 1, 2, 3, 4, 5, 6, 7, - 8, 10, 12, 16, 32, 62, 128, 256 }; - if (m==0) { /* BEQI */ - branch_taken = (user_state->ar[s]==b4const[r]); - } else if (m==1) { /* BNEI */ - branch_taken = (user_state->ar[s]!=b4const[r]); - } else if (m==2) { /* BLTI */ - branch_taken = (((int)user_state->ar[s])ar[s])>=b4const[r]); - } - } else if (n==3) { - int b4constu[16] = - { 32768, 65536, 2, 3, 4, 5, 6, 7, - 8, 10, 12, 16, 32, 62, 128, 256 }; - if (m==2) { /* BLTUI */ - branch_taken = (user_state->ar[s]ar[s]>=b4constu[r]); - } - } - } else if (op0==0x7) { - if (r==0) { /* BNONE */ - branch_taken = ((user_state->ar[s]&user_state->ar[t])==0); - } else if (r==1) { /* BEQ */ - branch_taken = (user_state->ar[s]==user_state->ar[t]); - } else if (r==2) { /* BLT */ - branch_taken = ((int)user_state->ar[s]<(int)user_state->ar[t]); - } else if (r==3) { /* BLTU */ - branch_taken = (user_state->ar[s]ar[t]); - } else if (r==4) { /* BALL */ - branch_taken = (((~user_state->ar[s])&user_state->ar[t])==0); - } else if (r==5) { /* BBC */ -#if XCHAL_HAVE_BE - branch_taken = ((user_state->ar[s]&(0x80000000>>user_state->ar[t]))==0); - } else if (r==6) { /* BBCI */ - branch_taken = ((user_state->ar[s]&(0x80000000>>t))==0); - } else if (r==7) { /* BBCI */ - branch_taken = ((user_state->ar[s]&(0x80000000>>(t+16)))==0); -#else - branch_taken = ((user_state->ar[s]&(1<ar[t]))==0); - } else if (r==6) { /* BBCI */ - branch_taken = ((user_state->ar[s]&(1<ar[s]&(1<<(t+16)))==0); -#endif - } else if (r==8) { /* BANY */ - branch_taken = ((user_state->ar[s]&user_state->ar[t])!=0); - } else if (r==9) { /* BNE */ - branch_taken = (user_state->ar[s]!=user_state->ar[t]); - } else if (r==10) { /* BGE */ - branch_taken = ((int)user_state->ar[s]>=(int)user_state->ar[t]); - } else if (r==11) { /* BGEU */ - branch_taken = (user_state->ar[s]>=user_state->ar[t]); - } else if (r==12) { /* BNALL */ - branch_taken = (((~user_state->ar[s])&user_state->ar[t])!=0); - } else if (r==13) { /* BBS */ -#if XCHAL_HAVE_BE - branch_taken = ((user_state->ar[s]&(0x80000000>>user_state->ar[t]))!=0); - } else if (r==14) { /* BBSI */ - branch_taken = ((user_state->ar[s]&(0x80000000>>t))!=0); - } else if (r==15) { /* BBSI */ - branch_taken = ((user_state->ar[s]&(0x80000000>>(t+16)))!=0); -#else - branch_taken = ((user_state->ar[s]&(1<ar[t]))!=0); - } else if (r==14) { /* BBSI */ - branch_taken = ((user_state->ar[s]&(1<ar[s]&(1<<(t+16)))!=0); -#endif - } - } - if (branch_taken) { - if (inst_type & XTHAL_DEST_REG_INST) { - return user_state->ar[inst_type>>24]; - } else if (inst_type & XTHAL_DEST_REL_INST) { - return xthal_branch_addr((void *)user_state->pc); - } - } -#if XCHAL_HAVE_LOOPS - else if (user_state->lcount && (npc==user_state->lend)) - return user_state->lbeg; -#endif - } - return npc; -} - -#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */ - diff --git a/src/arch/xtensa/hal/debug_hndlr.S b/src/arch/xtensa/hal/debug_hndlr.S deleted file mode 100644 index 172f61c03966..000000000000 --- a/src/arch/xtensa/hal/debug_hndlr.S +++ /dev/null @@ -1,146 +0,0 @@ -// -// debug_hndlr.S -- default Xtensa debug exception handler -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/debug_hndlr.S#1 $ - -// Copyright (c) 2003-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - -#if XCHAL_HAVE_DEBUG && XCHAL_HAVE_EXCEPTIONS - - /* - * Default debug exception handler. - * - * Note that the debug exception vector must save a3 - * in EXCSAVE+XCHAL_DEBUGLEVEL before jumping here. - * - * This handler is used when no debugger is present. - * The end result of executing this default handler - * is as if no debug exception had occurred, eg. as if - * the core was running at PS.INTLEVEL >= DEBUGLEVEL. - * - * Because the debug exception vector might get - * placed in ROM, and be expected to work regardless - * of what executable image or OS is running in RAM, - * we're very careful not to use any RAM here. - * We don't know what RAM we can safely use. - * This tricky part to accomplishing this feat - * is to use only *one* register (a3, which was - * saved in EXCSAVE+XCHAL_DEBUGLEVEL), because we don't - * have RAM in which to safely store other regs. - * - * A real debugger application would normally - * have some kind of conventions, or special - * hardware support, to have its own RAM workspace - * in which to save context and do real work - * in this handler. - */ - - -#if XSHAL_DEBUG_VECTOR_ISROM - // Debug exception vector is in ROM, so place the handler - // in ROM also. Otherwise running different executables - // with that ROM will not work because the handler would - // likely not be there or be at the wrong address. - // - .section .srom.text, "ax" -#else - // Debug exception vector is in RAM, so we can safely - // place the handler in RAM as well. - // - .text -#endif - - .global xthal_debugexc_defhndlr_nw - .align 4 -xthal_debugexc_defhndlr_nw: - rsr.debugcause a3 // get cause of debug exception - - // Check for possible debug causes, in priority order. - // We only handle the highest priority condition present. - // (If there are multiple conditions, the lower priority - // condition(s) will normally trigger upon return from - // this exception handler.) - - bbci.l a3, DEBUGCAUSE_ICOUNT_SHIFT, 1f // ICOUNT trap? - movi a3, 0 - wsr.icount a3 // clear ICOUNT - j 3f - -/* - * Ensure that we have IBREAKs, otherwise the IBREAKENABLE - * special register is not there: - */ -#if XCHAL_NUM_IBREAK > 0 -1: bbci.l a3, DEBUGCAUSE_IBREAK_SHIFT, 1f // IBREAK match? - movi a3, 0 - wsr.ibreakenable a3 // disable IBREAK traps - j 3f -#endif - -/* Also check for DBREAK registers: */ -#if XCHAL_NUM_DBREAK > 0 -1: bbci.l a3, DEBUGCAUSE_DBREAK_SHIFT, 1f // DBREAK match? - movi a3, 0 - wsr.dbreakc0 a3 // disable DBREAK register 0 -# if XCHAL_NUM_DBREAK > 1 - wsr.dbreakc1 a3 // disable DBREAK register 1 -# endif - j 3f -#endif - -1: bbci.l a3, DEBUGCAUSE_BREAK_SHIFT, 1f // BREAK instruction? - //readsr epc XCHAL_DEBUGLEVEL a3 // get PC pointing to BREAK - //l8ui a3, a3, 1 // get first 4-bit operand of BREAK (in 2nd byte) - //extui a3, a3, (XCHAL_HAVE_BE*4), 4 // pos depends on endianness - //bnei a3, 1, 3f // is it a BREAK 1,x instruction? - readsr epc XCHAL_DEBUGLEVEL a3 // get PC pointing to BREAK - addi a3, a3, 3 // skip BREAK instruction - writesr epc XCHAL_DEBUGLEVEL a3 // update PC - j 3f - -1: bbci.l a3, DEBUGCAUSE_BREAKN_SHIFT, 1f // BREAK.N instruction? - readsr epc XCHAL_DEBUGLEVEL a3 // get PC pointing to BREAK - addi a3, a3, 2 // skip BREAK.N instruction - writesr epc XCHAL_DEBUGLEVEL a3 // update PC - j 3f - -1: bbci.l a3, DEBUGCAUSE_DEBUGINT_SHIFT, 1f // debug interrupt? - // Nothing to do... - j 3f - -1: // Unknown debug case? ignore - -3: readsr excsave XCHAL_DEBUGLEVEL a3 // restore a3 - rfi XCHAL_DEBUGLEVEL // return from debug exception - - .size xthal_debugexc_defhndlr_nw, . - xthal_debugexc_defhndlr_nw - - -#if XSHAL_DEBUG_VECTOR_ISROM - .text // in case this gets included by something else -#endif - -#endif /* XCHAL_HAVE_DEBUG */ - diff --git a/src/arch/xtensa/hal/disass.c b/src/arch/xtensa/hal/disass.c deleted file mode 100644 index 5f8d8517c551..000000000000 --- a/src/arch/xtensa/hal/disass.c +++ /dev/null @@ -1,153 +0,0 @@ -// -// disass.c - disassembly routines for Xtensa -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/disass.c#1 $ - -// Copyright (c) 2004-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - -#ifdef XCHAL_OP0_FORMAT_LENGTHS -extern const unsigned char Xthal_op0_format_lengths[16]; -#endif -extern const unsigned char Xthal_byte0_format_lengths[256]; - - -#if defined(__SPLIT__op0_format_lengths) - -/* Instruction length in bytes as function of its op0 field (first nibble): */ -#ifdef XCHAL_OP0_FORMAT_LENGTHS -const unsigned char Xthal_op0_format_lengths[16] = { - XCHAL_OP0_FORMAT_LENGTHS -}; -#endif - - -#elif defined(__SPLIT__byte0_format_lengths) - -/* Instruction length in bytes as function of its first byte: */ -const unsigned char Xthal_byte0_format_lengths[256] = { - XCHAL_BYTE0_FORMAT_LENGTHS -}; - - -#elif defined(__SPLIT__disassemble_size) - -// -// Disassembly is currently not supported in xtensa hal. -// - -int xthal_disassemble_size( unsigned char *instr_buf ) -{ -#ifdef XCHAL_OP0_FORMAT_LENGTHS - /* Extract op0 field of instruction (first nibble used for decoding): */ -# if XCHAL_HAVE_BE - int op0 = ((*instr_buf >> 4) & 0xF); -# else - int op0 = (*instr_buf & 0xF); -# endif - /*return (op0 & 8) ? 2 : 3;*/ /* simple case only works consistently on older hardware */ - return Xthal_op0_format_lengths[op0]; -#else - return Xthal_byte0_format_lengths[*instr_buf]; -#endif -} - - -#elif defined(__SPLIT__disassemble) - -/* - * Note: we make sure to avoid the use of library functions, - * to minimize dependencies. - */ -int xthal_disassemble( - unsigned char *instr_buffer, /* the address of the instructions */ - void *tgt_address, /* where the instruction is to be */ - char *buffer, /* where the result goes */ - unsigned buflen, /* size of buffer */ - unsigned options /* what to display */ - ) -{ -#define OUTC(c) do{ if( p < endp ) *p = c; p++; }while(0) - int i, n; - char *p = buffer, *endp = buffer + buflen - 1; - /*static char *ret = " decoding not supported";*/ - static const char _hexc[16] = "0123456789ABCDEF"; - - n = xthal_disassemble_size( instr_buffer ); - - if( options & XTHAL_DISASM_OPT_ADDR ) { - unsigned addr = (unsigned)tgt_address; - for( i = 0; i < 8; i++ ) { - OUTC( _hexc[(addr >> 28) & 0xF] ); - addr <<= 4; - } - } - - if( options & XTHAL_DISASM_OPT_OPHEX ) { - if( p > buffer ) - OUTC( ' ' ); - for( i = 0; i < 3; i++ ) { - if( i < n ) { - OUTC( _hexc[(*instr_buffer >> 4) & 0xF] ); - OUTC( _hexc[*instr_buffer++ & 0xF] ); - } else { - OUTC( ' ' ); - OUTC( ' ' ); - } - OUTC( ' ' ); - } - } - - if( options & XTHAL_DISASM_OPT_OPCODE ) { - if( p > buffer ) - OUTC( ' ' ); - OUTC( '?' ); - OUTC( '?' ); - OUTC( '?' ); - OUTC( ' ' ); - OUTC( ' ' ); - OUTC( ' ' ); - OUTC( ' ' ); - } - - if( options & XTHAL_DISASM_OPT_PARMS ) { - if( p > buffer ) - OUTC( ' ' ); - OUTC( '?' ); - OUTC( '?' ); - OUTC( '?' ); - } - - if( p < endp ) - *p = 0; - else if( buflen > 0 ) - *endp = 0; - - return p - buffer; /* return length needed, even if longer than buflen */ -} - -#undef OUTC - - -#endif /*split*/ diff --git a/src/arch/xtensa/hal/int_asm.S b/src/arch/xtensa/hal/int_asm.S deleted file mode 100644 index 4ce36da45a26..000000000000 --- a/src/arch/xtensa/hal/int_asm.S +++ /dev/null @@ -1,643 +0,0 @@ -// -// int_asm.S - assembly language interrupt utility routines -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/int_asm.S#1 $ - -// Copyright (c) 2003-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - -#if XCHAL_HAVE_INTERRUPTS -/* Offsets of XtHalVPriState structure members (Xthal_vpri_state variable): */ -#define XTHAL_VPRI_VPRI_OFS 0x00 -#define XTHAL_VPRI_LOCKLEVEL_OFS 0x01 -#define XTHAL_VPRI_LOCKVPRI_OFS 0x02 -#define XTHAL_VPRI_PAD0_OFS 0x03 -#define XTHAL_VPRI_ENABLED_OFS 0x04 -#define XTHAL_VPRI_LOCKMASK_OFS 0x08 -#define XTHAL_VPRI_PAD1_OFS 0x0C -#define XTHAL_VPRI_ENABLEMAP_OFS 0x10 -#define XTHAL_VPRI_RESOLVEMAP_OFS (0x10+0x40*(XCHAL_NUM_INTLEVELS+1)) -#define XTHAL_VPRI_END_OFS (0x10+0x40*(XCHAL_NUM_INTLEVELS*2+1)) -#endif /* XCHAL_HAVE_INTERRUPTS */ - - -#if defined(__SPLIT__get_intenable) || \ - defined(__SPLIT__get_intenable_nw) - -//---------------------------------------------------------------------- -// Access INTENABLE register from C -//---------------------------------------------------------------------- - -// unsigned xthal_get_intenable(void) -// -DECLFUNC(xthal_get_intenable) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - rsr.intenable a2 -# else - movi a2, 0 // if no INTENABLE (no interrupts), tell caller nothing is enabled -# endif - abi_return - endfunc - -#endif - -#if defined(__SPLIT__set_intenable) || \ - defined(__SPLIT__set_intenable_nw) - -// void xthal_set_intenable(unsigned) -// -DECLFUNC(xthal_set_intenable) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - wsr.intenable a2 -# endif - abi_return - endfunc - - -//---------------------------------------------------------------------- -// Access INTERRUPT, INTSET, INTCLEAR register from C -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__get_interrupt) || \ - defined (__SPLIT__get_interrupt_nw) - -// unsigned xthal_get_interrupt(void) -// -DECLFUNC (xthal_get_interrupt) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - rsr.interrupt a2 -# else - movi a2, 0 // if no INTERRUPT (no interrupts), tell caller nothing is pending -# endif - abi_return - endfunc - -#endif - -#if defined(__SPLIT__get_intread) || \ - defined(__SPLIT__get_intread_nw) - -DECLFUNC (xthal_get_intread) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - rsr.interrupt a2 -# else - movi a2, 0 // if no INTERRUPT (no interrupts), tell caller nothing is pending -# endif - abi_return - endfunc - -#endif - -#if defined(__SPLIT__set_intset) || \ - defined(__SPLIT__set_intset_nw) - -// void xthal_set_intset(unsigned) -// -DECLFUNC(xthal_set_intset) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - wsr.intset a2 -# endif - abi_return - endfunc - -#endif - -#if defined(__SPLIT__set_intclear) || \ - defined(__SPLIT__set_intclear_nw) - -// void xthal_set_intclear(unsigned) -// -DECLFUNC(xthal_set_intclear) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - wsr.intclear a2 -# endif - abi_return - endfunc - - - -//---------------------------------------------------------------------- -// Virtual PS.INTLEVEL support: -// allows running C code at virtual PS.INTLEVEL > 0 -// using INTENABLE to simulate the masking that PS.INTLEVEL would do. -//---------------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__get_vpri) ||\ - defined(__SPLIT__get_vpri_nw) - -// unsigned xthal_get_vpri(void); - -DECLFUNC(xthal_get_vpri) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - movi a2, Xthal_vpri_state - l8ui a2, a2, XTHAL_VPRI_VPRI_OFS -# else - movi a2, 0 // no interrupts, report we're always at level 0 -# endif - abi_return - endfunc - -#endif - -#if defined(__SPLIT__set_vpri_nw) - -// unsigned xthal_set_vpri_nw(unsigned) -// -// Must be called at PS.INTLEVEL <= 1. -// Doesn't touch the stack (doesn't reference a1 at all). -// Normally, PS should be restored with a6 after return from this call -// (it isn't restored automatically because some exception handlers -// want to keep ints locked for a while). -// -// On entry: -// a2 = new virtual interrupt priority (0x00 .. 0x1F) -// a3-a6 = undefined -// PS.INTLEVEL <= 1 -// On exit: -// a2 = previous virtual interrupt priority (0x0F .. 0x1F, or 0 if no interrupts) -// a3-a5 = clobbered -// a6 = PS as it was on entry -// PS.INTLEVEL = 1 -// !!!!!!!!! PS.WOE = 0 (but not if there are no interrupts; is this really needed???) -// INTENABLE = updated according to new vpri - -_SYM(xthal_set_vpri_nw) - -# if XCHAL_HAVE_INTERRUPTS - /* Make sure a2 is in the range 0x0F .. 0x1F: */ - movi a3, 0x1F // highest legal virtual interrupt priority - sub a4, a2, a3 // (a4 = newlevel - maxlevel) - movgez a2, a3, a4 // newlevel = maxlevel if (newlevel - maxlevel) >= 0 - movi a3, 15 // lowest legal virtual interrupt priority - sub a4, a2, a3 // (a4 = newlevel - 15) - movltz a2, a3, a4 // newlevel = 15 if newlevel < 15 - -xthal_set_vpri_nw_common: - movi a4, Xthal_vpri_state // address of vpri state structure - - /* - * Lockout interrupts for exclusive access to virtual priority structure - * while we examine and modify it. - * Note that we accessed a4 and don't access any further than a6, - * so we won't cause any spills, so we could leave WOE enabled (if it is), - * but we clear it because that might be what the caller wants, - * and is cleaner. - */ - // Get PS and mask off INTLEVEL: - rsil a6, 1 // save a6 = PS, set PS.INTLEVEL = 1 - - // Clear PS.WOE. (Can we get rid of this?!!!!!): - movi a3, ~0x00040000 // mask to... - rsr.ps a5 // get and save a6 = PS -//a2,a3,a4,a5,a6 - and a5, a5, a3 // ... clear a5.WOE - wsr.ps a5 // clear PS.WOE - rsync - -//a2,a4,a6 - /* Get mask of interrupts to be turned off at requested level: */ - l32i a5, a4, XTHAL_VPRI_ENABLED_OFS // get the global mask - addx4 a3, a2, a4 // a3 = a4 + a2*4 (index into enablemap[] array) -//a2,a3,a4,a5,a6 - l32i a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // get the per-level mask - and a3, a5, a3 // new INTENABLE value according to new intlevel - wsr.intenable a3 // set it! -//a2,a4,a6 - - l8ui a5, a4, XTHAL_VPRI_VPRI_OFS // previous virtual priority - s8i a2, a4, XTHAL_VPRI_VPRI_OFS // new virtual priority - - // Let the caller restore PS: - //wsr.ps a6 // restore PS.INTLEVEL - //rsync - - mov a2, a5 // return previous virtual intlevel - -# else /* ! XCHAL_HAVE_INTERRUPTS */ -xthal_set_vpri_nw_common: -# if XCHAL_HAVE_EXCEPTIONS - rsr.ps a6 // return PS for caller to restore -# else - movi a6, 0 -# endif - movi a2, 0 // no interrupts, report we're always at virtual priority 0 -# endif /* XCHAL_HAVE_INTERRUPTS */ - ret - endfunc - - - -// unsigned xthal_set_vpri_intlevel_nw(unsigned); -// -// Same as xthal_set_vpri_nw() except that it accepts -// an interrupt level rather than a virtual interrupt priority. -// This just converts intlevel to vpri and jumps to xthal_set_vpri_nw. - -_SYM(xthal_set_vpri_intlevel_nw) -# if XCHAL_HAVE_INTERRUPTS - movi a3, 0x10 - movnez a2, a3, a2 // a2 = (a2 ? 0x10 : 0) - addi a2, a2, 0x0F // a2 += 0x0F -# endif - j xthal_set_vpri_nw_common // set vpri to a2 - endfunc - - - -#endif - -#if defined(__SPLIT__set_vpri) - -// unsigned xthal_set_vpri (unsigned newvpri); -// -// Normal windowed call (PS.INTLEVEL=0 and PS.WOE=1 on entry and exit). -// (PS.UM = 0 or 1) -// -// Returns previous virtual interrupt priority -// (0x0F .. 0x1F, or 0 if no interrupts). -// -// On entry: -// a2 = new virtual interrupt priority (0x00 .. 0x1F) -// On exit: -// a2 = previous vpri -// INTENABLE = updated according to new vpri - -DECLFUNC(xthal_set_vpri) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - /* Make sure a2 is in the range 0x0F .. 0x1F: */ - movi a3, 0x1F // highest legal virtual interrupt priority - sub a4, a2, a3 // (a4 = newlevel - maxlevel) - movgez a2, a3, a4 // newlevel = maxlevel if (newlevel - maxlevel) >= 0 - movi a3, 15 // lowest legal virtual interrupt priority - sub a4, a2, a3 // (a4 = newlevel - 15) - movltz a2, a3, a4 // newlevel = 15 if newlevel < 15 - -xthal_set_vpri_common1: - movi a4, Xthal_vpri_state // address of vpri state structure - - /* - * Lockout interrupts for exclusive access to virtual priority structure - * while we examine and modify it. - * Note that we accessed a4 and don't access any further than a6, - * so we won't cause any spills, so we can leave WOE enabled. - */ - // Get PS and mask off INTLEVEL: - rsil a6, 1 // save a6 = PS, set PS.INTLEVEL = 1 - - l8ui a7, a4, XTHAL_VPRI_VPRI_OFS // previous virtual priority (vpri) - - /* Get mask of interrupts to be turned off at requested level: */ - l32i a5, a4, XTHAL_VPRI_ENABLED_OFS // get the global mask - addx4 a3, a2, a4 // a3 = a4 + a2*4 (index into enablemap[] array) - l32i a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // get the per-level mask - s8i a2, a4, XTHAL_VPRI_VPRI_OFS // new virtual priority (in load-slot) - and a3, a5, a3 // new INTENABLE value according to new intlevel - wsr.intenable a3 // set it! - - wsr.ps a6 // restore PS.INTLEVEL - rsync - - mov a2, a7 // return previous vpri - -# else /* ! XCHAL_HAVE_INTERRUPTS */ - movi a2, 0 // no interrupts, report we're always at virtual priority 0 -# endif /* XCHAL_HAVE_INTERRUPTS */ - abi_return - endfunc - - - -// unsigned xthal_set_vpri_intlevel (unsigned intlevel); -// -// Equivalent to xthal_set_vpri(XTHAL_VPRI(intlevel,0xF)). -// This just converts intlevel to vpri and jumps inside xthal_set_vpri. - -DECLFUNC(xthal_set_vpri_intlevel) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - movi a3, 0x10 - movnez a2, a3, a2 // a2 = (a2 ? 0x10 : 0) - addi a2, a2, 0x0F // a2 += 0x0F - j xthal_set_vpri_common1 // set vpri to a2 -# else - movi a2, 0 // no interrupts, report we're always at virtual priority 0 - abi_return -# endif - endfunc - - - -// unsigned xthal_set_vpri_lock (void); -// -// Equivalent to xthal_set_vpri(0x1F); -// Returns previous virtual interrupt priority. -// -DECLFUNC(xthal_set_vpri_lock) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - movi a2, 0x1F // lock at intlevel 1 - j xthal_set_vpri_common1 -# else - movi a2, 0 // no interrupts, report we're always at virtual priority 0 - abi_return -# endif - endfunc - - -#endif - -#if defined(__SPLIT__get_intpending_nw) - -// unsigned xthal_get_intpending_nw(void) -// -// Of the pending level-1 interrupts, returns -// the bitmask of interrupts at the highest software priority, -// and the index of the first of these. -// It also disables interrupts of that software priority and lower -// via INTENABLE. -// -// On entry: -// a0 = return PC -// a1 = sp -// a2-a6 = (available) (undefined) -// PS.INTLEVEL = 1 -// PS.WOE = 0 -// On exit: -// a0 = return PC -// a1 = sp (NOTE: stack is untouched, a1 is never referenced) -// a2 = index of first highest-soft-pri pending l1 interrupt (0..31), or -1 if none -// a3 = bitmask of highest-soft-pri pending l1 interrupts (0 if none) (may be deprecated) -// a4 = (clobbered) -// a5 = new vpri (not typically used by caller? so might get deprecated...?) -// a6 = old vpri (eg. to be saved as part of interrupt context's state) -// INTENABLE = updated according to new vpri -// INTERRUPT bit cleared for interrupt returned in a2 (if any), if software or edge-triggered or write-error -// all others = preserved - -_SYM(xthal_get_intpending_nw) -# if XCHAL_HAVE_INTERRUPTS - // Give us one more register to play with - //wsr.excsave1 a4 - - // Figure out which interrupt to process - - /* - Perform a binary search to find a mask of the interrupts that are - ready at the highest virtual priority level. - Xthal_vpri_resolvemap is a binary tree implemented within an array, - sorted by priority: each node contains the set of interrupts in - the range of priorities corresponding to the right half of its branch. - The mask of enabled & pending interrupts is compared with each node to - determine in which subbranch (left or right) the highest priority one is - present. After 4 such masks and comparisons (for 16 priorities), we have - determined the priority of the highest priority enabled&pending interrupt. - - Table entries for intlevel 'i' are bitmasks defined as follows (map=Xthal_vpri_resolvemap[i-1]): - map[8+(x=0)] = ints at pri x + 8..15 (8-15) - map[4+(x=0,8)] = ints at pri x + 4..7 (4-7,12-15) - map[2+(x=0,4,8,12)] = ints at pri x + 2..3 (2-3,6-7,10-11,14-15) - map[1+(x=0,2..12,14)] = ints at pri x + 1 (1,3,5,7,9,11,13,15) - map[0] = 0 (unused; for alignment) - */ - - rsr.interrupt a4 // a4 = mask of interrupts pending, including those disabled - rsr.intenable a2 // a2 = mask of interrupts enabled - movi a3, Xthal_vpri_state - and a4, a2, a4 // a4 = mask of enabled interrupts pending - beqz a4, gipfail // if none (can happen for spurious level-triggered interrupts, - // or ???), we're done - - mov a5, a3 - l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+8*4 - bnone a2, a4, 1f - addi a5, a5, 8*4 -1: l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+4*4 - bnone a2, a4, 1f - addi a5, a5, 4*4 -1: l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+2*4 - bnone a2, a4, 1f - addi a5, a5, 2*4 -1: l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+1*4 - bnone a2, a4, 1f - addi a5, a5, 1*4 -1: - -# if 0 - a5 = address of map ... - l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+8*4 - addi a?, a5, 8*4 - and a2, a2, a4 - movnez a5, a?, a2 - l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+4*4 - addi a?, a5, 4*4 - and a2, a2, a4 - movnez a5, a?, a2 - l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+2*4 - addi a?, a5, 2*4 - and a2, a2, a4 - movnez a5, a?, a2 - l32i a2, a5, XTHAL_VPRI_RESOLVEMAP_OFS+1*4 - addi a?, a5, 1*4 - and a2, a2, a4 - movnez a5, a?, a2 -# endif - - // Here: - // a3 = Xthal_vpri_state - // a5 = Xthal_vpri_state + softpri*4 - // a4 = mask of enabled interrupts pending - // a2,a6 = available - - // Lock interrupts during virtual priority data structure transaction: - //rsil a6, 1 // set PS.INTLEVEL = 1 (a6 ignored) - // a2,a6 = available - - // The highest priority interrupt(s) in a4 is at softpri = (a5-a3) / 4. - // So interrupts in enablemap[1][softpri] are not in a4 (they are higher priority). - // The set of interrupts at softpri are: - // enablemap[1][softpri-1] - enablemap[1][softpri] - // So and'ing a4 with enablemap[1][softpri - 1] will give us - // the set of interrupts pending at the highest soft priority. - // - l32i a2, a5, XTHAL_VPRI_ENABLEMAP_OFS + 16*4 - 4 // get enablemap[1][softpri-1] - and a4, a2, a4 // only keep interrupts of highest pri (softpri) - - // a4 now has mask of pending interrupts at highest ready level (new vpri) - - // Update INTENABLE for this new virtual priority - l32i a2, a5, XTHAL_VPRI_ENABLEMAP_OFS + 16*4 // get vpri-specific mask = enablemap[1][softpri] - l32i a6, a3, XTHAL_VPRI_ENABLED_OFS // get global mask - sub a5, a5, a3 // a5 = softpri * 4 (for below; here for efficiency) - and a2, a2, a6 // and together - wsr.intenable a2 // disable interrupts at or below new vpri - // a2,a6 = available - - // Update new virtual priority: - l8ui a6, a3, XTHAL_VPRI_VPRI_OFS // get old vpri (returned) - srli a5, a5, 2 // a5 = softpri (0..15) - addi a5, a5, 0x10 // a5 = 0x10 + softpri = new virtual priority - s8i a5, a3, XTHAL_VPRI_VPRI_OFS // store new vpri (returned) - - // Undo the temporary lock (if was at PS.INTLEVEL > 1): - //rsil a2, 1 - - mov a3, a4 // save for the caller (in case it wants it?) - - // Choose one of the set of highest-vpri pending interrupts to process. - // For speed (and simplicity), use this simple two-instruction sequence - // to select the least significant bit set in a4. This implies that - // interrupts with a lower interrupt number take precedence over those - // with a higher interrupt number (!!). - // - neg a2, a4 // keep only the least-significant bit that is set... - and a4, a2, a4 // ... in a4 - - // Software, edge-triggered, and write-error interrupts are cleared by writing to the - // INTCLEAR pseudo-reg (to clear relevant bits of the INTERRUPT register). - // To simplify interrupt handlers (so they avoid tracking which type of - // interrupt they handle and act accordingly), clear such interrupts here. - // To avoid race conditions, the clearing must occur *after* we undertake - // to process the interrupt, and *before* actually handling the interrupt. - // Interrupt handlers may additionally clear the interrupt themselves - // at appropriate points if needed to avoid unnecessary interrupts. - // -#define CLEARABLE_INTLEVEL1_MASK (XCHAL_INTLEVEL1_MASK & XCHAL_INTCLEARABLE_MASK) -# if CLEARABLE_INTLEVEL1_MASK != 0 - //movi a2, CLEARABLE_INTLEVEL1_MASK - //and a2, a2, a4 - //wsr.intclear a2 - wsr.intclear a4 // no effect if a4 not a software or edge-triggered or write-error interrupt -# endif - - // Convert the single-bit interrupt mask to an interrupt number. - // (ie. compute log2 using either the NSAU instruction or a binary search) - - find_ms_setbit a2, a4, a2, 0 // set a2 to index of lsbit set in a4 (0..31) - // NOTE: assumes a4 != 0 (otherwise a2 is undefined[?]) - - // a2 has vector number (0..31) - - //rsr.excsave1 a4 - ret - -gipfail: - l8ui a6, a3, XTHAL_VPRI_VPRI_OFS // get old vpri - mov a5, a6 // is also new vpri (unchanged) -# else /* XCHAL_HAVE_INTERRUPTS */ - // No interrupts configured! - movi a5, 0 // return zero new vpri - movi a6, 0 // return zero old vpri -# endif /* XCHAL_HAVE_INTERRUPTS */ - movi a2, -1 // return bogus vector number (eg. can be quickly tested for negative) - movi a3, 0 // return zero bitmask of interrupts pending - ret - endfunc - -// ----------------------------------------------------------------- - -#endif - -#if defined(__SPLIT__vpri_lock) || \ - defined(__SPLIT__vpri_lock_nw) - -// void xthal_vpri_lock() -// -// Used internally by the Core HAL to block interrupts of higher or equal -// priority than Xthal_vpri_locklevel during virtual interrupt operations. -// -DECLFUNC(xthal_vpri_lock) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - rsil a6, 1 // save a6 = PS, set PS.INTLEVEL = 1 - - // if( Xthal_vpri_level < Xthal_vpri_locklevel ) - // - movi a2, Xthal_vpri_state // a2 := address of global var. Xthal_vpri_state - //interlock - l8ui a3, a2, XTHAL_VPRI_VPRI_OFS // a3 := Xthal_vpri_level == Xthal_vpri_state.vpri - l8ui a5, a2, XTHAL_VPRI_LOCKLEVEL_OFS // a5 := Xthal_vpri_locklevel - l32i a4, a2, XTHAL_VPRI_ENABLED_OFS // a4 := Xthal_vpri_enabled - bgeu a3, a5, xthal_vpri_lock_done - - // xthal_set_intenable( Xthal_vpri_enablemap[0][Xthal_vpri_locklevel] & Xthal_vpri_enabled ); - // - addx4 a3, a5, a2 // a3 := a2 + a5*4 (index into enablemap[] array) - l32i a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // a3 := Xthal_vpri_enablemap[0][Xthal_vpri_locklevel] - //interlock - and a2, a4, a3 - wsr.intenable a2 - -xthal_vpri_lock_done: - wsr.ps a6 // restore PS.INTLEVEL - rsync -# endif - abi_return - endfunc - -#endif - -#if defined(__SPLIT__vpri_unlock) || \ - defined(__SPLIT__vpri_unlock_nw) - -// void xthal_vpri_unlock(void) -// -// Enable interrupts according to the current virtual interrupt priority. -// This effectively "unlocks" interrupts disabled by xthal_vpri_lock() -// (assuming the virtual interrupt priority hasn't changed). -// -DECLFUNC(xthal_vpri_unlock) - abi_entry -# if XCHAL_HAVE_INTERRUPTS - // - // This should be free of race-conditions. - // - // xthal_set_intenable( Xthal_vpri_enablemap[0][Xthal_vpri_level] & Xthal_vpri_enabled ); - // - movi a2, Xthal_vpri_state // a2 := address of global var. Xthal_vpri_state - //interlock - l8ui a3, a2, XTHAL_VPRI_VPRI_OFS // a3 := Xthal_vpri_level == Xthal_vpri_state.vpri - l32i a4, a2, XTHAL_VPRI_ENABLED_OFS // a4 := Xthal_vpri_enabled - addx4 a3, a3, a2 // a3 := a2 + a3*4 (index into enablemap[] array) - l32i a3, a3, XTHAL_VPRI_ENABLEMAP_OFS // a3 := Xthal_vpri_enablemap[0][Xthal_vpri_level] - //interlock - and a2, a4, a3 - wsr.intenable a2 -# endif - abi_return - endfunc - -#endif /*SPLIT*/ - diff --git a/src/arch/xtensa/hal/interrupts.c b/src/arch/xtensa/hal/interrupts.c deleted file mode 100644 index 1bf8bd99c559..000000000000 --- a/src/arch/xtensa/hal/interrupts.c +++ /dev/null @@ -1,850 +0,0 @@ -// -// interrupts.c - interrupts related constants and functions -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/interrupts.c#1 $ - -// Copyright (c) 2002-2004 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -#if XCHAL_HAVE_INTERRUPTS - -/* For internal use by the HAL: */ -// static void xthal_vpri_lock(void); -// static void xthal_vpri_unlock(void); -extern void xthal_vpri_lock(void); -extern void xthal_vpri_unlock(void); - - -/* - * Definitions: - * - * Virtual interrupt level = 0 .. 0xFF - * - * ... - */ - -#define XTHAL_DEFAULT_SOFTPRI 4 /* default software priority (range 0..15) */ - /* IMPORTANT: if you change this, you also - need to update the initial resolvemap[] - value below... */ - -/* - * Macros to convert between: - * intlevel (0..15) and software priority within an intlevel (0..15) - * and - * virtual interrupt priority (0..0xFF), which is a combination of the above two. - */ -#define XTHAL_VPRI_INTLEVEL(vpri) (((vpri) >> 4) & 0xF) -#define XTHAL_VPRI_SOFTPRI(vpri) ((vpri) & 0xF) -#define XTHAL_VPRI(intlevel,softpri) ((((intlevel)&0xF)<<4)|((softpri)&0xF)) - - -/* - * Virtual priority management data structures. - * This structure is instantiated as Xthal_vpri_state (below). - * - * IMPORTANT: if you change anything in this structure, - * you must accordingly change structure offsets - * defined in int_asm.S . - * - * IMPORTANT: the worst-case offset of the resolvemap[] field is 976 bytes - * (0x10 + 0x40*15), which is accessed in int_asm.S at a further - * offset of 8*4==32 for a total offset of 1008, very close - * to l32i's offset limit of 1020. So you can't push it much - * further. - * - * [INTERNAL NOTE: There might be a trick that will save 64 bytes, - * if really needed, by trimming 15 word entries from the start - * of enablemap[] ... -MG] - */ -typedef struct XtHalVPriState { - /* - * Current virtual interrupt priority (0x0F .. 0xFF) - * (or actually, 0x0F .. XCHAL_NUM_INTLEVELS*0x10+0x0F). - * Virtual priorities 0x00 to 0x0E are mapped to 0x0F (they're all - * equivalent, because there's no such thing as a level 0 interrupt), - * which may help optimize the size of enablemap[] in the future. - * Virtual priorities above XCHAL_NUM_INTLEVELS*0x10+0x0F are - * mapped to XCHAL_NUM_INTLEVELS*0x10+0x0F, which is equivalent. - * - * NOTE: this variable is actually part of the processor context, - * which means (for most OSes) that it must be saved - * in the task control block along with other register state. - */ - unsigned char vpri; // current virtual interrupt priority (0x0F..0xFF) - unsigned char locklevel; // real interrupt level used to get exclusive - // access to this structure; MUST be at least one (1) - unsigned char lockvpri; // virtual interrupt level used to get exclusive - // access to this structure; MUST be XTHAL_VPRI(locklevel,15) - // (so it's at least 0x1F); placed here for efficiency - unsigned char pad0; // (alignment padding, unused) - - unsigned enabled; // mask of which interrupts are enabled, regardless of level - // (level masking is applied on top of this) - - unsigned lockmask; // (unused?) INTENABLE value used to lock out - // interrupts for exclusive access to this structure - - unsigned pad1; // (alignment padding, unused) - - /* - * For each virtual interrupt priority, this array provides the - * bitmask of interrupts of greater virtual priority - * (ie. the set of interrupts to enable at that virtual priority, - * if all interrupts were enabled in field 'enabled'). - */ - unsigned enablemap[XCHAL_NUM_INTLEVELS+1][16]; - - /* - * Table entries for intlevel 'i' are bitmasks defined as follows, - * with map == Xthal_vpri_resolvemap[i-1]: - * map[8+(x=0)] = ints at pri x + 8..15 (8-15) - * map[4+(x=0,8)] = ints at pri x + 4..7 (4-7,12-15) - * map[2+(x=0,4,8,12)] = ints at pri x + 2..3 (2-3,6-7,10-11,14-15) - * map[1+(x=0,2..12,14)] = ints at pri x + 1 (1,3,5,7,9,11,13,15) - * map[0] = 0 (unused; for alignment) - */ - unsigned resolvemap[XCHAL_NUM_INTLEVELS][16]; - -} XtHalVPriState; - - -extern XtHalVPriState Xthal_vpri_state; -extern unsigned char Xthal_int_vpri[32]; -extern XtHalVoidFunc * Xthal_tram_trigger_fn; - -extern void xthal_null_func(void); - -/* Shorthand for structure members: */ -#define Xthal_vpri_level Xthal_vpri_state.vpri -#define Xthal_vpri_locklevel Xthal_vpri_state.locklevel -#define Xthal_vpri_lockvpri Xthal_vpri_state.lockvpri -#define Xthal_vpri_enabled Xthal_vpri_state.enabled -#define Xthal_vpri_lockmask Xthal_vpri_state.lockmask // unused? -#define Xthal_vpri_enablemap Xthal_vpri_state.enablemap -#define Xthal_vpri_resolvemap Xthal_vpri_state.resolvemap -#if 0 -Combined refs: - - enablemap, vpri, enabled (xthal_set_vpri[_nw]) - - enablemap, vpri, enabled, resolvemap (xthal_get_intpending_nw) - - enablemap, vpri, enabled, locklevel (xthal_vpri_lock) - - enablemap, vpri, enabled (xthal_vpri_unlock) -#endif - -#endif /* XCHAL_HAVE_INTERRUPTS */ - - - -#if defined(__SPLIT__num_intlevels) - -// the number of interrupt levels -const unsigned char Xthal_num_intlevels = XCHAL_NUM_INTLEVELS; - -#endif - -#if defined(__SPLIT__num_interrupts) - -// the number of interrupts -const unsigned char Xthal_num_interrupts = XCHAL_NUM_INTERRUPTS; - -#endif - -#if defined(__SPLIT__excm_level) - -// the highest level of interrupts masked by PS.EXCM (if XEA2) -const unsigned char Xthal_excm_level = XCHAL_EXCM_LEVEL; - -#endif - -#if defined(__SPLIT__intlevel_mask) - -// mask of interrupts at each intlevel -const unsigned Xthal_intlevel_mask[16] = { - XCHAL_INTLEVEL_MASKS -}; - -#endif - -#if defined(__SPLIT__intlevel_andbelow_mask) - -// mask for level 1 to N interrupts -const unsigned Xthal_intlevel_andbelow_mask[16] = { - XCHAL_INTLEVEL_ANDBELOW_MASKS -}; - -#endif - -#if defined(__SPLIT__intlevel) - -// level per interrupt -const unsigned char Xthal_intlevel[32] = { - XCHAL_INT_LEVELS -}; - -#endif - -#if defined(__SPLIT__inttype) - -// type of each interrupt -const unsigned char Xthal_inttype[32] = { - XCHAL_INT_TYPES -}; - -#endif - -#if defined(__SPLIT__inttype_mask) - -const unsigned Xthal_inttype_mask[XTHAL_MAX_INTTYPES] = { - XCHAL_INTTYPE_MASKS -}; - -#endif - -#if defined(__SPLIT__timer_interrupt) - -// interrupts assigned to each timer (CCOMPARE0 to CCOMPARE3), -1 if unassigned -const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS] = { - XCHAL_TIMER_INTERRUPTS -}; - -#endif - -#if defined(__SPLIT__vpri) - -#if XCHAL_HAVE_INTERRUPTS - -/* - * Note: this structure changes dynamically at run-time, - * but is initialized here for efficiency and simplicity, - * according to configuration. - */ -XtHalVPriState Xthal_vpri_state = { - 0x00, /* vpri */ - 1, /* locklevel */ - 0x1F, /* lockvpri */ - 0, /* pad0 */ - 0x00000000, /* enabled */ - 0x00000000, /* lockmask (unused?) */ - 0, /* pad1 */ - -#define DEFAULT_ENABLEMAP(levela,levelb) \ - { (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 0 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 1 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 2 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 3 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 4 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 5 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 6 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 7 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 8 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI > 9 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI >10 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI >11 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI >12 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI >13 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI >14 ? levela : levelb)), \ - (XCHAL_INTLEVEL15_ANDBELOW_MASK & ~(XTHAL_DEFAULT_SOFTPRI >15 ? levela : levelb)) } - - /* Xthal_vpri_enablemap[XCHAL_NUM_INTLEVELS+1][16]: */ - { - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL0_ANDBELOW_MASK,XCHAL_INTLEVEL0_ANDBELOW_MASK), -#if XCHAL_NUM_INTLEVELS >= 1 - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL0_ANDBELOW_MASK,XCHAL_INTLEVEL1_ANDBELOW_MASK), -#endif -#if XCHAL_NUM_INTLEVELS >= 2 - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL1_ANDBELOW_MASK,XCHAL_INTLEVEL2_ANDBELOW_MASK), -#endif -#if XCHAL_NUM_INTLEVELS >= 3 - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL2_ANDBELOW_MASK,XCHAL_INTLEVEL3_ANDBELOW_MASK), -#endif -#if XCHAL_NUM_INTLEVELS >= 4 - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL3_ANDBELOW_MASK,XCHAL_INTLEVEL4_ANDBELOW_MASK), -#endif -#if XCHAL_NUM_INTLEVELS >= 5 - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL4_ANDBELOW_MASK,XCHAL_INTLEVEL5_ANDBELOW_MASK), -#endif -#if XCHAL_NUM_INTLEVELS >= 6 - DEFAULT_ENABLEMAP(XCHAL_INTLEVEL5_ANDBELOW_MASK,XCHAL_INTLEVEL6_ANDBELOW_MASK), -#endif -#if XCHAL_NUM_INTLEVELS >= 7 -# error Interrupt levels greater than 6 not currently supported in the HAL interrupt routines. -#endif - }, - - /* Xthal_vpri_resolvemap[XCHAL_NUM_INTLEVELS][16]: */ - { -#if XCHAL_NUM_INTLEVELS >= 1 /* set for default soft priority of 4: */ - {0,0,0,0, XCHAL_INTLEVEL1_MASK,0,0,0, 0,0,0,0, 0,0,0,0}, -#endif -#if XCHAL_NUM_INTLEVELS >= 2 /* set for default soft priority of 4: */ - {0,0,0,0, XCHAL_INTLEVEL2_MASK,0,0,0, 0,0,0,0, 0,0,0,0}, -#endif -#if XCHAL_NUM_INTLEVELS >= 3 /* set for default soft priority of 4: */ - {0,0,0,0, XCHAL_INTLEVEL3_MASK,0,0,0, 0,0,0,0, 0,0,0,0}, -#endif -#if XCHAL_NUM_INTLEVELS >= 4 /* set for default soft priority of 4: */ - {0,0,0,0, XCHAL_INTLEVEL4_MASK,0,0,0, 0,0,0,0, 0,0,0,0}, -#endif -#if XCHAL_NUM_INTLEVELS >= 5 /* set for default soft priority of 4: */ - {0,0,0,0, XCHAL_INTLEVEL5_MASK,0,0,0, 0,0,0,0, 0,0,0,0}, -#endif -#if XCHAL_NUM_INTLEVELS >= 6 /* set for default soft priority of 4: */ - {0,0,0,0, XCHAL_INTLEVEL6_MASK,0,0,0, 0,0,0,0, 0,0,0,0}, -#endif -#if XCHAL_NUM_INTLEVELS >= 7 /* set for default soft priority of 4: */ -# error Interrupt levels greater than 6 not currently supported in the HAL interrupt routines. -#endif - } - -}; - - -/* - * Virtual (software) priority (0x00..0xFF) of each interrupt. - * This isn't referenced by assembler. - */ -unsigned char Xthal_int_vpri[32] = { -#define DEFAULT_INTVPRI(level) (level ? ((level << 4) | XTHAL_DEFAULT_SOFTPRI) : 0) - DEFAULT_INTVPRI( XCHAL_INT0_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT1_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT2_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT3_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT4_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT5_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT6_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT7_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT8_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT9_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT10_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT11_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT12_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT13_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT14_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT15_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT16_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT17_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT18_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT19_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT20_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT21_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT22_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT23_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT24_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT25_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT26_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT27_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT28_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT29_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT30_LEVEL ), - DEFAULT_INTVPRI( XCHAL_INT31_LEVEL ) -}; - - -#if 0 -/* - * A number of things may have already been written not calling - * this function, so it isn't straightforward to start requiring it: - */ -void xthal_vpri_init( int default_vpri ) -{ - int i, j; - - Xthal_vpri_level = 0; /* vpri */ - Xthal_vpri_locklevel = 1; /* locklevel */ - Xthal_vpri_lockvpri = 0x1F; /* lockvpri */ - Xthal_vpri_enabled = 0x00000000; /* enabled */ - Xthal_vpri_lockmask = 0x00000000; /* lockmask (unused?) */ - for( i = 0; i < XCHAL_NUM_INTLEVELS; i++ ) { - for( j = 0; j < 16; j++ ) - Xthal_vpri_enablemap[i][j] = XCHAL_INTLEVEL15_ANDBELOW_MASK - & ~Xthal_intlevel_andbelow_mask[i - (j < default_vpri && i > 0)]; - } - for( i = 1; i < XCHAL_NUM_INTLEVELS; i++ ) { - for( j = 0; j < 16; j++ ) - Xthal_vpri_resolvemap[i-1][j] = 0; - if( (default_vpri & 1) != 0 ) - Xthal_vpri_resolvemap[i-1][default_vpri & 0xF] |= Xthal_intlevel_mask[i]; - if( (default_vpri & 2) != 0 ) - Xthal_vpri_resolvemap[i-1][default_vpri & 0xE] |= Xthal_intlevel_mask[i]; - if( (default_vpri & 4) != 0 ) - Xthal_vpri_resolvemap[i-1][default_vpri & 0xC] |= Xthal_intlevel_mask[i]; - if( (default_vpri & 8) != 0 ) - Xthal_vpri_resolvemap[i-1][default_vpri & 0x8] |= Xthal_intlevel_mask[i]; - } - for( i = 0; i < 32; i++ ) - Xthal_int_vpri[i] = (Xthal_intlevel[i] << 4) | (default_vpri & 0xF); -} -#endif /*0*/ - -void xthal_null_func(void) { } -XtHalVoidFunc *Xthal_tram_trigger_fn = xthal_null_func; - - -#endif /* XCHAL_HAVE_INTERRUPTS */ - -#endif - -#if defined(__SPLIT__vpri_to_intlevel) - -/* - * xthal_vpri_to_intlevel - * - * Converts a virtual interrupt priority to the closest equivalent - * (equal or higher) interrupt level. - */ -unsigned xthal_vpri_to_intlevel(unsigned vpri) -{ -#if XCHAL_HAVE_INTERRUPTS - return( XTHAL_VPRI_INTLEVEL( vpri ) ); -#else - return( vpri ); -#endif -} - -#endif - -#if defined(__SPLIT__intlevel_to_vpri) - -/* - * xthal_intlevel_to_vpri - * - * Converts an interrupt level to a virtual interrupt priority. - */ -unsigned xthal_intlevel_to_vpri(unsigned intlevel) -{ -#if XCHAL_HAVE_INTERRUPTS - return( XTHAL_VPRI( intlevel, 0xF ) ); -#else - return( intlevel ); -#endif -} - -#endif - -#if defined(__SPLIT__vpri_int_enable) - -/* - * xthal_int_enable - * - * Enables given set of interrupts, and returns previous enabled-state of these interrupts. - */ -unsigned xthal_int_enable(unsigned mask) -{ -#if XCHAL_HAVE_INTERRUPTS - unsigned prev_enabled, syncmask; - - xthal_vpri_lock(); - prev_enabled = Xthal_vpri_enabled | Xthal_tram_enabled; - - /* Figure out which bits must go in Xthal_tram_enabled: */ - syncmask = (mask & Xthal_tram_pending & Xthal_tram_sync); - if( syncmask != 0 ) { - Xthal_tram_enabled |= syncmask; - mask &= ~syncmask; - /* - * If we are re-enabling a pending trampolined interrupt, - * there is a possibility that the level-1 software interrupt - * is no longer pending, having already occurred (without processing - * the trampoline because it was disabled). So we have to - * ensure that the level-1 software interrupt used for trampolining - * is pending. - * We let the BSP do this rather than the HAL, because it could - * potentially use an external level-1 interrupt to trampoline - * (if proper hardware was available) rather than a software interrupt. - */ - (*Xthal_tram_trigger_fn)(); - } - /* The rest go in the global enabled mask: */ - Xthal_vpri_enabled |= mask; - - xthal_vpri_unlock(); /* update INTENABLE as per current vpri */ - return( prev_enabled ); - -#else /* XCHAL_HAVE_INTERRUPTS */ - return( 0 ); -#endif /* XCHAL_HAVE_INTERRUPTS */ -} - -#endif - -#if defined(__SPLIT__vpri_int_disable) - -/* - * xthal_int_disable - * - * Disables given set of interrupts, and returns previous enabled-state of these interrupts. - */ -unsigned xthal_int_disable(unsigned mask) -{ -#if XCHAL_HAVE_INTERRUPTS - unsigned prev_enabled; - - xthal_vpri_lock(); - prev_enabled = Xthal_vpri_enabled | Xthal_tram_enabled; - Xthal_vpri_enabled &= ~mask; - Xthal_tram_enabled &= ~mask; - xthal_vpri_unlock(); /* update INTENABLE as per current vpri */ - return( prev_enabled ); -#else - return( 0 ); -#endif -} - -#endif - -#if defined(__SPLIT__set_vpri_locklevel) - -void xthal_set_vpri_locklevel(unsigned intlevel) -{ -#if XCHAL_HAVE_INTERRUPTS - if( intlevel < 1 ) - intlevel = 1; - else if( intlevel > XCHAL_NUM_INTLEVELS ) - intlevel = XCHAL_NUM_INTLEVELS; - Xthal_vpri_state.locklevel = intlevel; - Xthal_vpri_state.lockvpri = XTHAL_VPRI(intlevel, 15); -#endif -} - -#endif - -#if defined(__SPLIT__get_vpri_locklevel) - -unsigned xthal_get_vpri_locklevel(void) -{ -#if XCHAL_HAVE_INTERRUPTS - return( Xthal_vpri_state.locklevel ); -#else - return( 1 ); /* must return at least 1, some OSes assume this */ -#endif -} - -#endif - -#if defined(__SPLIT__set_int_vpri) - -/* - * xthal_set_int_vpri (was intSetL1Pri) - * - * Set the virtual (software) priority of an interrupt. - * Note: the intlevel of an interrupt CANNOT be changed -- this is - * set in hardware according to the core configuration file. - * - * intnum interrupt number (0..31) - * vpri virtual interrupt priority (0..15, or intlevel*16+(0..15) ) - */ -int xthal_set_int_vpri(int intnum, int vpri) -{ -#if XCHAL_HAVE_INTERRUPTS - unsigned mask, maskoff, basepri, prevpri, intlevel, *maskp, i; - - /* - * Verify parameters: - */ - if( (unsigned)intnum >= XCHAL_NUM_INTERRUPTS || (unsigned)vpri > 0xFF ) - return( 0 ); /* error: bad parameter(s) */ - /* - * If requested priority specifies an intlevel, it must match that - * of the interrupt specified; otherwise (0..15) the proper intlevel of - * the specified interrupt is assumed, and added to the parameter: - */ - intlevel = Xthal_intlevel[intnum]; /* intnum's intlevel */ - if( intlevel == 0 || intlevel > XCHAL_NUM_INTLEVELS ) - return( 0 ); /* error: no support for setting priority of NMI etc. */ - basepri = intlevel << 4; /* intnum's base soft-pri. */ - if( vpri > 0x0F ) { /* intlevel portion given? */ - if( (vpri & 0xF0) != basepri ) /* then it must be correct */ - return( 0 ); /* error: intlevel mismatch */ - vpri &= 0x0F; /* remove it */ - } - - mask = 1L << intnum; - - /* - * Lock interrupts during virtual priority data structure updates: - */ - xthal_vpri_lock(); - - /* - * Update virtual priority of 'intnum': - */ - prevpri = Xthal_int_vpri[intnum]; /* save for return value */ - Xthal_int_vpri[intnum] = basepri | vpri; - /* This interrupt must only be enabled at virtual priorities lower than its own: */ - for( i = 0; i < vpri; i++ ) - Xthal_vpri_enablemap[0][basepri++] |= mask; - maskoff = ~mask; - for( ; i <= 0x0F; i++ ) - Xthal_vpri_enablemap[0][basepri++] &= maskoff; - - /* - * Update the prioritization table used to resolve priorities by binary search: - */ - /* Remove interrupt from prioritization table: */ - maskp = Xthal_vpri_resolvemap[intlevel-1]; - for (i=0; i<16; i++) - maskp[i] &= maskoff; - /* Add interrupt to prioritization table at its (new) given priority: */ - if( vpri & 0x1 ) - maskp[vpri] |= mask; - if( vpri & 0x2 ) - maskp[vpri & 0xE] |= mask; - if( vpri & 0x4 ) - maskp[vpri & 0xC] |= mask; - if( vpri & 0x8 ) - maskp[vpri & 0x8] |= mask; - - /* - * Unlock interrupts (back to current level) and update INTENABLE: - */ - xthal_vpri_unlock(); - - return( prevpri ); -#else /* XCHAL_HAVE_INTERRUPTS */ - return( 0 ); -#endif /* XCHAL_HAVE_INTERRUPTS */ -} /* xthal_set_int_vpri */ - -#endif - -#if defined(__SPLIT__get_int_vpri) - -int xthal_get_int_vpri(int intnum) -{ -#if XCHAL_HAVE_INTERRUPTS - if( (unsigned)intnum >= XCHAL_NUM_INTERRUPTS ) - return( 0 ); /* error: bad parameter */ - return( Xthal_int_vpri[intnum] ); -#else - return( 0 ); -#endif -} - - -#endif - -#if defined(__SPLIT__trampolines) - - - /* - SUPPORT FOR TRAMPOLINES - - NOTE: trampolining is a special case. - There are two ways (defined here) to trampoline down - from a high-level interrupt to a level-one interrupt. - - a) Synchronous (restrained) trampolining. - Trampolining without clearing the high-level interrupt, - letting the level-one interrupt handler clear the - source of the interrupt. - Here the high-level interrupt must be kept disabled - while trampolining down, and re-enabled after the - level-one interrupt handler completes. - This is what one might do to "convert" a high-level - interrupt into a level-one interrupt. - The high-level interrupt handler code can be generic. - [One could argue this type of trampolining isn't required, - which may? be true...] - b) Asynchronous (free) trampolining. - Trampolining when clearing the high-level interrupt - right away in the high-level interrupt handler. - Here the high-level interrupt is allowed to remain - enabled while trampolining occurs. This is very - useful when some processing must occur with low - latency, but the rest of the processing can occur - at lower (eg. level-one) priority. It is particularly - useful when the lower-priority processing occurs - for only some of the high-level interrupts. - Of course this requires custom assembler code to - handle the high-level interrupt and clear the source - of the interrupt, so the high-level interrupt handler - cannot be generic (as opposed to synchronous trampolining). - - In both cases, a level-one software interrupt is used - for trampolining (one could also trampoline from level - m to n, m > n, n > 1, but that isn't nearly as useful; - it's generally the ability to execute C code and - to process exceptions that is sought after). - - Default trampolining support is currently implemented as follows. - - Trampoline handler: - - A high-level interrupt is considered enabled if *either* - its INTENABLE bit or its xt_tram_ints bit is set - (note that both should never be set at the same time). - - */ - - -/* These are described in xtensa/hal.h (assumed initialized to zero, in BSS): */ -unsigned Xthal_tram_pending; -unsigned Xthal_tram_enabled; -unsigned Xthal_tram_sync; - - - -XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn ) -{ -#if XCHAL_HAVE_INTERRUPTS - XtHalVoidFunc *fn; - - fn = Xthal_tram_trigger_fn; - Xthal_tram_trigger_fn = trigger_fn; - return( fn ); -#else - (void)trigger_fn; - return( 0 ); -#endif -} - - -/* - * xthal_tram_set_sync - * - * Configure type of trampoline for a high-level interrupt. - * By default any trampoline is asynchronous, this need only - * be called to tell the Core HAL that a high-level interrupt - * will be using synchronous trampolining (down to a level-1 interrupt). - * - * intnum interrupt number (0 .. 31) - * sync 0 = async, 1 = synchronous - * - * Returns previous sync state of interrupt (0 or 1) - * or -1 if invalid interrupt number provided. - */ -int xthal_tram_set_sync( int intnum, int sync ) -{ -#if XCHAL_HAVE_INTERRUPTS - unsigned mask; - int prev; - - if( (unsigned)intnum >= XCHAL_NUM_INTERRUPTS ) - return( -1 ); - mask = 1L << intnum; - prev = ((Xthal_tram_sync & mask) != 0); - if( sync ) - Xthal_tram_sync |= mask; - else - Xthal_tram_sync &= ~mask; - return( prev ); -#else /* XCHAL_HAVE_INTERRUPTS */ - return( 0 ); -#endif /* XCHAL_HAVE_INTERRUPTS */ -} - - -/* - * xthal_tram_pending_to_service - * - * This is called by the trampoline interrupt handler - * (eg. by a level-one software interrupt handler) - * to obtain the bitmask of high-level interrupts - * that it must service. - * Returns that bitmask (note: this can sometimes be zero, - * eg. if currently executing level-one code disables the high-level - * interrupt before the trampoline handler has a chance to run). - * - * This call automatically clears the trampoline pending - * bits for the interrupts in the returned mask. - * So the caller *must* process all interrupts that have - * a corresponding bit set if the value returned by this function - * (otherwise those interrupts may likely be lost). - * - * This function should be called with level-one interrupts disabled - * (via INTENABLE; can't be via PS.INTLEVEL because this is C code). - */ -unsigned xthal_tram_pending_to_service( void ) -{ -#if XCHAL_HAVE_INTERRUPTS - unsigned service_mask; - - service_mask = ( Xthal_tram_pending - & (Xthal_vpri_enabled | Xthal_tram_enabled) ) ; - - /* - * Clear trampoline pending bits. - * Each bit must be cleared *before* processing of the corresponding - * interrupt occurs, to avoid missing interrupts. - * Here we just clear all bits for simplicity and convenience. - */ - Xthal_tram_pending &= ~service_mask; - - return( service_mask ); -#else /* XCHAL_HAVE_INTERRUPTS */ - return( 0 ); -#endif /* XCHAL_HAVE_INTERRUPTS */ -} - -/* - * xthal_tram_done - * - * This is called by the trampoline interrupt handler - * (eg. by a level-one software interrupt handler) - * to indicate that processing of a trampolined interrupt - * (eg. one or more of the bits it received from - * xthal_tram_acknowledge()) has completed. - * - * For asynchronously trampolined interrupt(s), there is nothing to do. - * For synchronously trampolined interrupt(s), the high-level - * interrupt(s) must be re-enabled (presumably the level-one - * interrupt handler that just completed has cleared the source - * of the high-level interrupt). - * - * This function should be called with level-one interrupts disabled - * (via INTENABLE; can't be via PS.INTLEVEL because this is C code). - */ -void xthal_tram_done( unsigned serviced_mask ) -{ -#if XCHAL_HAVE_INTERRUPTS - serviced_mask &= Xthal_tram_enabled; /* sync. trampolined interrupts that completed */ - Xthal_tram_enabled &= ~serviced_mask; - xthal_int_enable( serviced_mask ); -#endif -} - -#endif - -#if defined(__SPLIT__deprecated) - - -/**********************************************************************/ - -#ifdef INCLUDE_DEPRECATED_HAL_CODE -/* These definitions were present in an early beta version of the HAL and should not be used: */ -const unsigned Xthal_num_int_levels = XCHAL_NUM_INTLEVELS; -const unsigned Xthal_num_ints = XCHAL_NUM_INTERRUPTS; -__asm__(".global Xthal_int_level_mask\n" ".set Xthal_int_level_mask, Xthal_intlevel_mask+4"); -__asm__(".global Xthal_int_level1_to_n_mask\n" ".set Xthal_int_level1_to_n_mask, Xthal_intlevel_andbelow_mask+8"); -/*const unsigned Xthal_int_level_mask[15] = { XCHAL_INTLEVEL_MASKS }; ... minus the first entry ...*/ -/*const unsigned Xthal_int_level1_to_n_mask[14] = { XCHAL_INTLEVEL_ANDBELOW_MASKS }; ... minus the first two entries ...*/ -const unsigned Xthal_int_level[32] = { XCHAL_INT_LEVELS }; -const unsigned Xthal_int_type_edge = XCHAL_INTTYPE_MASK_EXTERN_EDGE; -const unsigned Xthal_int_type_level = XCHAL_INTTYPE_MASK_EXTERN_LEVEL; -const unsigned Xthal_int_type_timer = XCHAL_INTTYPE_MASK_TIMER; -const unsigned Xthal_int_type_software = XCHAL_INTTYPE_MASK_SOFTWARE; -#endif /* INCLUDE_DEPRECATED_HAL_CODE */ - - -#endif /* SPLITs */ - diff --git a/src/arch/xtensa/hal/mem_ecc_parity.S b/src/arch/xtensa/hal/mem_ecc_parity.S deleted file mode 100644 index 2a85a3ecb37b..000000000000 --- a/src/arch/xtensa/hal/mem_ecc_parity.S +++ /dev/null @@ -1,285 +0,0 @@ -// -// mem_ecc_parity.S - utility routines for the local memory ECC/parity option -// (memory error checking and exceptions) -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/mem_ecc_parity.S#1 $ - -// Copyright (c) 2006-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - -/* - * For most functions, the link-time HAL defines two entry points: - * xthal_...() and xthal_..._nw(). The former is the main entry point - * invoked from C code, or assembly code that follows the C ABI. - * The latter is for use in assembly code that cannot easily follow - * all the requirements of the windowed ABI, e.g. in exception handlers; - * these use the call0 ABI instead (in most cases; some use their own conventions). - * - * When software tools are configured to use the call0 ABI, both variants - * are identical (with some exceptions as noted). To avoid duplicating - * code, we define both labels for the same function body. The Makefile - * defines __SPLIT__..._nw macros with windowed ABI but not with Call0 ABI. - * Use SYM_NW() for the _nw variants defined with the __SPLIT_..._nw macros, - * i.e. for call0 ABI variants when windowed ABI is in use; these are not - * C callable so SYM_NW() does not specify .type information. - * Use SYMBOL() otherwise, which defines both symbols if call0 ABI is selected. - */ - -#if defined (__XTENSA_CALL0_ABI__) -# define SYMBOL(x) .global x ; .type x,@function ; \ - .global x ## _nw ; .type x ## _nw,@function ; \ - .align 4 ; x: ; x ## _nw: -#else -# define SYMBOL(x) .global x ; .type x,@function ; .align 4 ; x: -#endif -#define SYM_NW(x) .global x ; .align 4 ; x: - - -/* Compute smaller of I and D cache line sizes: */ -#if XCHAL_ICACHE_LINEWIDTH < XCHAL_DCACHE_LINEWIDTH && XCHAL_ICACHE_SIZE > 0 -# define CACHE_LINEWIDTH_MIN XCHAL_ICACHE_LINEWIDTH -# define CACHE_LINESIZE_MIN XCHAL_ICACHE_LINESIZE -#else -# define CACHE_LINEWIDTH_MIN XCHAL_DCACHE_LINEWIDTH -# define CACHE_LINESIZE_MIN XCHAL_DCACHE_LINESIZE -#endif - - - .text - -//------------------------------------------------------------------------ -// Inject errors into instruction and/or data RAMs, or cache data or tags -//------------------------------------------------------------------------ - -#if defined(__SPLIT__memep_inject_error) - -// void xthal_memep_inject_error(void *addr, int size, int flags); -// where: -// addr (a2) pointer to local memory, or cache address -// size (a3) size in bytes (gets aligned to words or lines) -// flags (a4) is a combination of the following bits: -// bit 31-5: (reserved) -// bit 4: 0 = inject non-correctable error, -// 16 = inject correctable error (if ECC) -// bit 3: (reserved) -// bit 2: 0 = local memory, 4 = cache -// bit 1: 0 = data cache, 2 = instruction cache -// bit 0: 0 = cache data, 1 = cache tag -// -// (note: data cache data is handled same as local memories; -// to access specific dcache data entries, you have to setup -// a region or page in cache-isolate mode yourself) - -SYMBOL(xthal_memep_inject_error) - abi_entry - -#if XCHAL_HAVE_MEM_ECC_PARITY - - // These MOVIs may be L32Rs, load them before enabling test mode: - movi a6, 0x02020202 // XOR'ing this creates a correctable error - bbsi.l a4, 4, 1f // branch if correctable error requested - movi a6, 0x03030303 // XOR'ing this creates a non-correctable error -1: - - // Lock out all interrupts, to avoid interrupt handlers running with - // test mode enabled (corrupting their stores, likely leading to - // non-correctable memory errors). - // - // If NMI is possible, you're toast - // (no stores during NMI handler will have properly computed ECC/parity bits) - // although you might make the NMI handler check MESR.ERRTEST and save/clear - // it if it's set on entry, so that its stores work correctly. - // - // If memory exceptions are possible, might be okay as long as the - // handler checks whether test mode is on, and turns it off temporarily - // to do its work. - // -# if XCHAL_HAVE_INTERRUPTS - rsil a11, 15 -# endif - - // Save current MESR and set test mode: - - rsr.mesr a8 - bbsi.l a8, MESR_ERRTEST_SHIFT, .Lproceed // already in test mode? - addmi a9, a8, MESR_ERRTEST // enable test mode - bbci.l a8, MESR_ERRENAB_SHIFT, 1f - addmi a9, a9, - MESR_ERRENAB // disable error checks -1: xsr.mesr a9 - beq a8, a9, .Lproceed // clean update, continue - bbci.l a9, MESR_RCE_SHIFT, .Lproceed // we likely restored a lost RCE, just keep it - // At this point, either we: - // a) cleared an RCE record that got created between RSR and XSR - // b) cleared LCE bits that got set between RSR and XSR - // c) more eclectic, and presumably much less likely, cases of - // RCE/LCE bits being cleared and set again between RSR and XSR - // due to multiple memory errors and memory error exceptions - // in that period; for now, we ignore this possibility - // (decreasing returns on addressing these arbitrarily complex cases) - // Assuming (a) or (b), restore the bits we took away. - //addmi a8, a8, MESR_ERRTEST - addmi a9, a9, MESR_ERRTEST - bbci.l a9, MESR_ERRENAB_SHIFT, 1f - addmi a9, a9, - MESR_ERRENAB // disable error checks -1: wsr.mesr a9 - //xsr.mesr a9 - //beq a8, a9, .Lproceed // updated fine, continue - // - // Above we could have used XSR instead of WSR. - // However, it's not clear at this point what's the cleanest thing - // to do if what we read back doesn't match what we expected, - // because at that point we have multiple errors to deal with. - // Unless we have code here to handle (fix and/or log) these errors, - // we have to chuck something away or write a bunch more code to - // handle another LCE bit getting set etc (also starting to be - // a low probability occurrence). -.Lproceed: - // Test mode enabled. From this point until we restore MESR, - // the only loads and stores done are for injecting errors. - -# if XCHAL_ICACHE_SIZE || XCHAL_DCACHE_SIZE - bbci.l a4, 2, .L_inject_local // branch if injecting to local memory - bbsi.l a4, 1, .L_inject_icache // branch if injecting to icache - // Inject errors in dcache: - bbci.l a4, 0, .L_inject_local // branch if injecting to dcache data -# if XCHAL_DCACHE_SIZE && XCHAL_HAVE_DCACHE_TEST - // Inject errors in dcache tags: - - // Round addr/size to fully rather than partially cover - // all aligned cache lines: - extui a9, a2, 0, XCHAL_DCACHE_LINEWIDTH - sub a2, a2, a9 - add a3, a3, a9 - addi a3, a3, XCHAL_DCACHE_LINESIZE-1 - srli a3, a3, XCHAL_DCACHE_LINEWIDTH // size in cache lines - - floopgtz a3, .Ldctagloop - ldct a9, a2 // load dcache line tag - rsr.mecr a7 // get check bits - xor a7, a7, a6 // ECC: single-bit error; Parity: NO-OP - wsr.mecr a7 // setup modified check bits - sdct a9, a2 // store tag with modified check bits - addi a2, a2, XCHAL_DCACHE_LINESIZE // increment to next line - floopend a3, .Ldctagloop -# endif /* have dcache */ - j .L_inject_done - - // Inject errors in icache: -.L_inject_icache: -# if XCHAL_ICACHE_SIZE && XCHAL_HAVE_ICACHE_TEST - bbci.l a4, 0, .L_inject_icw // branch if injecting to icache data - - // Inject errors in icache tags: - // Round addr/size to fully rather than partially cover - // all aligned cache lines: - extui a9, a2, 0, XCHAL_ICACHE_LINEWIDTH - sub a2, a2, a9 - add a3, a3, a9 - addi a3, a3, XCHAL_ICACHE_LINESIZE-1 - srli a3, a3, XCHAL_ICACHE_LINEWIDTH // size in cache lines - - floopgtz a3, .Lictagloop - lict a9, a2 // load icache line tag - rsr.mecr a7 // get check bits - xor a7, a7, a6 // ECC: single-bit error; Parity: NO-OP - wsr.mecr a7 // setup modified check bits - sict a9, a2 // store tag with modified check bits - addi a2, a2, XCHAL_ICACHE_LINESIZE // increment to next line - floopend a3, .Lictagloop - j .L_inject_done - -.L_inject_icw: -# if XCHAL_ICACHE_ACCESS_SIZE <= 4 /* SICW does not work usefully (replicates data) if accessWidth > 32 bits */ - // Inject errors in icache data words: - // Round addr/size to fully rather than partially cover - // all aligned 32-bit words: - extui a9, a2, 0, 2 - sub a2, a2, a9 - add a3, a3, a9 - addi a3, a3, 3 - srli a3, a3, 2 // size in words - - floopgtz a3, .Licwloop - licw a9, a2 // load word of icache line data - rsr.mecr a7 // get check bits - xor a7, a7, a6 // ECC: single-bit error; Parity: NO-OP - wsr.mecr a7 // setup modified check bits - sicw a9, a2 // store data with modified check bits - addi a2, a2, 4 // increment to next word - floopend a3, .Licwloop -# endif -# endif /* have icache */ - j .L_inject_done -# endif /* have icache or dcache */ - -.L_inject_local: - // Round addr/size to fully rather than partially cover - // all aligned 32-bit words: - extui a9, a2, 0, 2 - sub a2, a2, a9 - add a3, a3, a9 - addi a3, a3, 3 - srli a3, a3, 2 // size in words - - floopgtz a3, .Lendloop - l32i a9, a2, 0 // load data - rsr.mecr a7 // get check bits - xor a7, a7, a6 // ECC: single-bit error; Parity: NO-OP - wsr.mecr a7 // setup modified check bits - s32i a9, a2, 0 // store data with modified check bits - addi a2, a2, 4 // increment to next word - floopend a3, .Lendloop - -.L_inject_done: - // Restore MESR (a8 is the saved original MESR): - bbsi.l a8, MESR_ERRTEST_SHIFT, 2f // was already in test mode - rsr.mesr a6 - addmi a9, a6, - MESR_ERRTEST // disable test mode - bbci.l a8, MESR_ERRENAB_SHIFT, 1f - addmi a9, a9, MESR_ERRENAB // enable error checks -1: xsr.mesr a9 - beq a6, a9, 2f // clean update, done - bbci.l a9, MESR_RCE_SHIFT, 2f // we likely restored a lost RCE, just keep it - addmi a9, a9, - MESR_ERRTEST - bbci.l a8, MESR_ERRENAB_SHIFT, 1f - addmi a9, a9, MESR_ERRENAB // disable error checks -1: wsr.mesr a9 -2: - - // Restore PS.INTLEVEL: -# if XCHAL_HAVE_INTERRUPTS - wsr.ps a11 - rsync -# endif -#endif /* XCHAL_HAVE_MEM_ECC_PARITY */ - - abi_return - - .size xthal_memep_inject_error, . - xthal_memep_inject_error - - -#endif /*split*/ - -//---------------------------------------------------------------------- - diff --git a/src/arch/xtensa/hal/memcopy.S b/src/arch/xtensa/hal/memcopy.S deleted file mode 100644 index 5cabcf166d9b..000000000000 --- a/src/arch/xtensa/hal/memcopy.S +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Core HAL library functions xthal_memcpy and xthal_bcopy - */ - -/* - * Copyright (c) 2003, 2006, 2010 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - - -#ifdef __XTENSA_EB__ -# define BL(b,l) b -#else -# define BL(b,l) l -#endif - - .macro srcel r, early, late // combine early and late words, shift into \r - src \r, BL(\early,\late), BL(\late,\early) - .endm - - .macro ssa8f r // set shift-amount for shift *from* given 2-bit alignment - BL(ssa8b,ssa8l) \r - .endm - - .macro ssa8t r // set shift-amount for shift *to* given 2-bit alignment - BL(ssa8l,ssa8b) \r // (reverse of ssa8f) - .endm - - .macro s2ll r, s // shift-to-later logical (away from zero-addressed byte) - BL(srl,sll) \r, \s - .endm - - .macro s2el r, s // shift-to-early logical (towards zero-addressed byte) - BL(sll,srl) \r, \s - .endm - -/* - * void *xthal_memcpy(void *dst, const void *src, size_t len); - * void *xthal_bcopy(const void *src, void *dst, size_t len); - * - * This function is intended to do the same thing as the standard - * library function memcpy() (or bcopy()) for most cases. - * However, it uses strictly 32-bit load and store instructions - * to copy data. This ensures this function will work - * where the source and/or destination references an - * instruction RAM or ROM, which can only be accessed - * using l32i (IRAM+IROM) and s32i (IRAM). - * - * The bcopy version is provided here to avoid the overhead - * of an extra call, for callers that require this convention. - * - * The (general case) algorithm is as follows: - * If destination is unaligned, align it by copying 1 to 3 bytes. - * If source is aligned, - * do 16 bytes with a loop, and then finish up with - * 8, 4, and 0-3 byte copies conditional on the length; - * else (if source is unaligned), - * do the same, but use SRC to align the source data. - * This code tries to use fall-through branches for the common - * case of aligned source and destination and multiple - * of 4 length. - * - * Register use: - * a0/ return address - * a1/ stack pointer - * a2/ return value - * a3/ src - * a4/ length - * a5/ dst - * a6/ tmp - * a7/ tmp - * a8/ tmp - * a9/ tmp - * a10/ tmp - * a11/ tmp - * a12/ tmp - */ - -/* xthal_bcopy and xthal_memcpy need to allocate the same stack size - * on entry since they share the same function-return code. Also, - * there is more than one return point. */ - -#define SAVE_A0 0 -#define SAVE_A3 4 -#define SAVE_A4 8 -#define SAVE_A5 12 -#define SAVE_A12 16 -#define STKSIZE 32 - - - .text - .align 4 - .global xthal_bcopy - .type xthal_bcopy,@function -xthal_bcopy: -#ifdef __XTENSA_CALL0_ABI__ - addi sp, sp, -STKSIZE - s32i a12, a1, SAVE_A12 -#else - entry sp, 32 // allow for call8 below -#endif - // a2=src, a3=dst, a4=len - mov a5, a3 // copy dst so that a2 is return value - mov a3, a2 - mov a2, a5 - j .Lcommon // go to common code for memcpy+bcopy - - .size xthal_bcopy, . - xthal_bcopy - - - -/* - * Destination is unaligned - */ - - .align 4 -xthal_memcpy.prefixcode: // purely for purpose of .size -.Ldstunaligned: - mov a10, a5 - mov a11, a3 - movi a12, 4 - sub a6, a12, a6 // number of bytes to copy for dst alignment - mov a12, a6 -#ifdef __XTENSA_CALL0_ABI__ - s32i a0, a1, SAVE_A0 // preserve live registers - s32i a3, a1, SAVE_A3 - s32i a4, a1, SAVE_A4 - s32i a5, a1, SAVE_A5 - call0 xthal_copy123 - l32i a0, a1, SAVE_A0 // restore live registers - l32i a3, a1, SAVE_A3 - l32i a4, a1, SAVE_A4 - l32i a5, a1, SAVE_A5 - mov a6, a12 // restore a6 from callee-saved register -#else - call8 xthal_copy123 -#endif - add a5, a5, a6 - add a3, a3, a6 - sub a4, a4, a6 - j .Ldstaligned - - // Not sure how else to count code that precedes a function, in .size: - .size xthal_memcpy.prefixcode, . - xthal_memcpy.prefixcode - - - .align 4 - .global xthal_memcpy - .type xthal_memcpy,@function -xthal_memcpy: -#ifdef __XTENSA_CALL0_ABI__ - addi sp, sp, -STKSIZE - s32i a12, a1, SAVE_A12 -#else - entry sp, 32 // allow for call8 below -#endif - // a2=dst, a3=src, a4=len - mov a5, a2 // copy dst so that a2 is return value -.Lcommon: -#ifdef __XTENSA_CALL0_ABI__ - /* - * have to restore the stack - */ - _bgeui a4, 4, 1f - mov a12, a0 // preserve return address - call0 xthal_copy123 - mov a0, a12 // restore return address - l32i a12, a1, SAVE_A12 - addi sp, sp, STKSIZE - ret -1: -#else - bltui a4, 4, xthal_copy123_pastentry // NOTE: sometimes relaxes -#endif - - extui a6, a2, 0, 2 // destination unalignment offset - bnez a6, .Ldstunaligned // align the destination -.Ldstaligned: // return here once dst is aligned - srli a7, a4, 4 // number of loop iterations of 16-bytes each - extui a11, a3, 0, 2 // source unalignment offset - _bnez a11, .Lsrcunaligned // if source not aligned, use shifting copy - /* - * Destination and source are 32-bit aligned, use 32-bit copy. - */ -#if XCHAL_HAVE_LOOPS - loopnez a7, .Loop1done -#else /* !XCHAL_HAVE_LOOPS */ - beqz a7, .Loop1done - slli a8, a7, 4 - add a8, a8, a3 // a8 = end of last 16B source chunk -#endif /* !XCHAL_HAVE_LOOPS */ -.Loop1: - l32i a6, a3, 0 - l32i a7, a3, 4 - s32i a6, a5, 0 - l32i a6, a3, 8 - s32i a7, a5, 4 - l32i a7, a3, 12 - s32i a6, a5, 8 - addi a3, a3, 16 - s32i a7, a5, 12 - addi a5, a5, 16 -#if !XCHAL_HAVE_LOOPS - blt a3, a8, .Loop1 -#endif /* !XCHAL_HAVE_LOOPS */ -.Loop1done: - bbci.l a4, 3, .L2 - // copy 8 bytes - l32i a6, a3, 0 - l32i a7, a3, 4 - addi a3, a3, 8 - s32i a6, a5, 0 - s32i a7, a5, 4 - addi a5, a5, 8 -.L2: - bbci.l a4, 2, .L3 - // copy 4 bytes - l32i a6, a3, 0 - addi a3, a3, 4 - s32i a6, a5, 0 - addi a5, a5, 4 -.L3: - // Copy last 0 to 3 bytes using 32-bit accesses (aligned source and destination): - extui a4, a4, 0, 2 // any bytes to copy? - beqz a4, 1f // if not, skip this to avoid extraneous loads/stores - l32i a6, a3, 0 // get source word - l32i a7, a5, 0 // get destination word - ssa8f a4 // shift from length (end of source) - s2ll a6, a6 // align source to last byte - s2el a7, a7 // align parts of a7 following modified bytes, to early byte - ssa8t a4 // shift to end of modified destination (length) - srcel a7, a6, a7 // combine source with late-dst to form last word - s32i a7, a5, 0 // update last word -1: - -#ifdef __XTENSA_CALL0_ABI__ - l32i a12, a1, SAVE_A12 - addi sp, sp, STKSIZE - ret -#else - retw -#endif - - .size xthal_memcpy, . - xthal_memcpy - - - // void xthal_copy123(dst, src, len); - // - // Copy from 0 to 3 bytes, using only 32-bit loads and stores, - // with arbitrarily aligned source and destination. - // - // arg1 = a2 = dst - // arg2 = a3 = src - // arg3 = a4 = len - - .global xthal_copy123 - .type xthal_copy123,@function - .align 4 -xthal_copy123: - abi_entry - -xthal_copy123_pastentry: - _beqz a4, cdone // don't load or store if zero bytes - // First get the bytes: - movi a5, ~3 - and a5, a3, a5 // align src address - l32i a6, a5, 0 - l32i a7, a5, 4 - ssa8f a3 - srcel a3, a6, a7 - // a3 now contains source bytes, aligned to 1st byte (memory order) - // (source address is no longer needed at this point) - - // Does destination span two words?: - extui a10, a2, 0, 2 // destination alignment - sub a5, a2, a10 // align destination address - l32i a8, a5, 0 // get first destination word regardless - add a6, a10, a4 // dst_align + len - ssa8f a2 // shift from dst_align (to 1st or last byte) - s2ll a10, a8 // a10 = first part of destination, aligned to last byte - bltui a6, 4, oneword // branch if destination contained in single word - - // Two-word destination case: - l32i a8, a5, 4 // get second word - ssa8t a2 // shift to dst_align - srcel a10, a10, a3 // with a10 in early bytes, a3 in later bytes - s32i a10, a5, 0 // update first word - addi a5, a5, 4 // advance to last word for common code below - //movi a10, 0 // not needed, gets dropped - -oneword: - // One-word (and two-word) destination case: - // a8 = contents of last destination word - // a10 = early part of a8 preceding modified bytes, shifted towards last byte - // - ssa8f a4 // shift from length (end of source) - srcel a3, a10, a3 // combine early-destination with source, aligned to last byte - - ssa8f a6 // shift from end of modified destination (dst_align+len) - s2el a8, a8 // align parts of a8 following modified bytes, to early byte - ssa8t a6 // shift to end of modified destination (dst_align+len) - srcel a8, a3, a8 // combine early-dst+source with late-dst to form last word - s32i a8, a5, 0 // update last word -cdone: abi_return // return dst - -/* - * Destination is aligned, Source is unaligned - */ - - .align 4 -.Lsrcunaligned: - // Copy 16 bytes per iteration for word-aligned dst and unaligned src - ssa8f a3 // set shift amount from byte offset -#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the - lint or ferret client, or 0 to save a few cycles */ -#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT - extui a11, a3, 0, 2 // save unalignment offset for below - sub a3, a3, a11 // align a3 -#endif - l32i a6, a3, 0 // load first word -#if XCHAL_HAVE_LOOPS - loopnez a7, .Loop2done -#else /* !XCHAL_HAVE_LOOPS */ - beqz a7, .Loop2done - slli a10, a7, 4 - add a10, a10, a3 // a10 = end of last 16B source chunk -#endif /* !XCHAL_HAVE_LOOPS */ -.Loop2: - l32i a7, a3, 4 - l32i a8, a3, 8 - srcel a6, a6, a7 - s32i a6, a5, 0 - l32i a9, a3, 12 - srcel a7, a7, a8 - s32i a7, a5, 4 - l32i a6, a3, 16 - srcel a8, a8, a9 - s32i a8, a5, 8 - addi a3, a3, 16 - srcel a9, a9, a6 - s32i a9, a5, 12 - addi a5, a5, 16 -#if !XCHAL_HAVE_LOOPS - blt a3, a10, .Loop2 -#endif /* !XCHAL_HAVE_LOOPS */ -.Loop2done: - bbci.l a4, 3, .L12 - // copy 8 bytes - l32i a7, a3, 4 - l32i a8, a3, 8 - srcel a6, a6, a7 - s32i a6, a5, 0 - addi a3, a3, 8 - srcel a7, a7, a8 - s32i a7, a5, 4 - addi a5, a5, 8 - mov a6, a8 -.L12: - bbci.l a4, 2, .L13 - // copy 4 bytes - l32i a7, a3, 4 - addi a3, a3, 4 - srcel a6, a6, a7 - s32i a6, a5, 0 - addi a5, a5, 4 - mov a6, a7 -.L13: - // Copy last 0 to 3 bytes using 32-bit accesses (shifting source, aligned destination): - //_beqz a4[1:0], cdone // don't load or store if zero bytes - l32i a7, a3, 4 // get source word - l32i a3, a5, 0 // get destination word - srcel a6, a6, a7 // source bytes, aligned to early (1st) byte - ssa8f a4 // shift from length (end of source) - s2ll a6, a6 // combine early-destination with source, aligned to last byte - s2el a3, a3 // align parts of a3 following modified bytes, to early byte - ssa8t a4 // shift to end of modified destination (length) - srcel a3, a6, a3 // combine early-dst+source with late-dst to form last word - s32i a3, a5, 0 // update last word -.Ldone: -#ifdef __XTENSA_CALL0_ABI__ - l32i a12, a1, SAVE_A12 - addi sp, sp, STKSIZE - ret -#else - retw -#endif - - .size xthal_copy123, . - xthal_copy123 - diff --git a/src/arch/xtensa/hal/misc.c b/src/arch/xtensa/hal/misc.c deleted file mode 100644 index 7742851e291f..000000000000 --- a/src/arch/xtensa/hal/misc.c +++ /dev/null @@ -1,178 +0,0 @@ -// -// misc.c - miscellaneous constants -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/misc.c#1 $ - -// Copyright (c) 2004-2005 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - -// Software release info (not configuration-specific!): -const unsigned int Xthal_release_major = XTHAL_RELEASE_MAJOR; -const unsigned int Xthal_release_minor = XTHAL_RELEASE_MINOR; -const char * const Xthal_release_name = XTHAL_RELEASE_NAME; -#ifdef XTHAL_RELEASE_INTERNAL -const char * const Xthal_release_internal = XTHAL_RELEASE_INTERNAL; -#else -const char * const Xthal_release_internal = 0; -#endif -/* Old format, for backward compatibility: */ -const unsigned int Xthal_rev_no = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV; - -// number of registers in register window, or number of registers if not windowed -const unsigned int Xthal_num_aregs = XCHAL_NUM_AREGS; -const unsigned char Xthal_num_aregs_log2 = XCHAL_NUM_AREGS_LOG2; - -const unsigned char Xthal_memory_order = XCHAL_MEMORY_ORDER; -const unsigned char Xthal_have_windowed = XCHAL_HAVE_WINDOWED; -const unsigned char Xthal_have_density = XCHAL_HAVE_DENSITY; -const unsigned char Xthal_have_booleans = XCHAL_HAVE_BOOLEANS; -const unsigned char Xthal_have_loops = XCHAL_HAVE_LOOPS; -const unsigned char Xthal_have_nsa = XCHAL_HAVE_NSA; -const unsigned char Xthal_have_minmax = XCHAL_HAVE_MINMAX; -const unsigned char Xthal_have_sext = XCHAL_HAVE_SEXT; -const unsigned char Xthal_have_clamps = XCHAL_HAVE_CLAMPS; -const unsigned char Xthal_have_mac16 = XCHAL_HAVE_MAC16; -const unsigned char Xthal_have_mul16 = XCHAL_HAVE_MUL16; -const unsigned char Xthal_have_fp = XCHAL_HAVE_FP; -const unsigned char Xthal_have_speculation = XCHAL_HAVE_SPECULATION; -const unsigned char Xthal_have_exceptions = XCHAL_HAVE_EXCEPTIONS; -const unsigned char Xthal_xea_version = XCHAL_XEA_VERSION; -const unsigned char Xthal_have_interrupts = XCHAL_HAVE_INTERRUPTS; -const unsigned char Xthal_have_highlevel_interrupts = XCHAL_HAVE_HIGHLEVEL_INTERRUPTS; -const unsigned char Xthal_have_nmi = XCHAL_HAVE_NMI; -const unsigned char Xthal_have_prid = XCHAL_HAVE_PRID; -const unsigned char Xthal_have_release_sync = XCHAL_HAVE_RELEASE_SYNC; -const unsigned char Xthal_have_s32c1i = XCHAL_HAVE_S32C1I; -const unsigned char Xthal_have_threadptr = XCHAL_HAVE_THREADPTR; - -const unsigned char Xthal_have_pif = XCHAL_HAVE_PIF; -const unsigned short Xthal_num_writebuffer_entries = XCHAL_NUM_WRITEBUFFER_ENTRIES; - -const unsigned int Xthal_build_unique_id = XCHAL_BUILD_UNIQUE_ID; -// Release info for hardware targeted by software upgrades: -const unsigned int Xthal_hw_configid0 = XCHAL_HW_CONFIGID0; -const unsigned int Xthal_hw_configid1 = XCHAL_HW_CONFIGID1; -const unsigned int Xthal_hw_release_major = XCHAL_HW_VERSION_MAJOR; -const unsigned int Xthal_hw_release_minor = XCHAL_HW_VERSION_MINOR; -const char * const Xthal_hw_release_name = XCHAL_HW_VERSION_NAME; -const unsigned int Xthal_hw_min_version_major = XCHAL_HW_MIN_VERSION_MAJOR; -const unsigned int Xthal_hw_min_version_minor = XCHAL_HW_MIN_VERSION_MINOR; -const unsigned int Xthal_hw_max_version_major = XCHAL_HW_MAX_VERSION_MAJOR; -const unsigned int Xthal_hw_max_version_minor = XCHAL_HW_MAX_VERSION_MINOR; -#ifdef XCHAL_HW_RELEASE_INTERNAL -const char * const Xthal_hw_release_internal = XCHAL_HW_RELEASE_INTERNAL; -#else -const char * const Xthal_hw_release_internal = 0; -#endif - -/* MMU related info... */ - -const unsigned char Xthal_have_spanning_way = XCHAL_HAVE_SPANNING_WAY; -const unsigned char Xthal_have_identity_map = XCHAL_HAVE_IDENTITY_MAP; -const unsigned char Xthal_have_mimic_cacheattr = XCHAL_HAVE_MIMIC_CACHEATTR; -const unsigned char Xthal_have_xlt_cacheattr = XCHAL_HAVE_XLT_CACHEATTR; -const unsigned char Xthal_have_cacheattr = XCHAL_HAVE_CACHEATTR; -const unsigned char Xthal_have_tlbs = XCHAL_HAVE_TLBS; -#if XCHAL_HAVE_MPU -const unsigned char Xthal_mmu_asid_bits = 0; -const unsigned char Xthal_mmu_asid_kernel = 0; -const unsigned char Xthal_mmu_rings = 0; -const unsigned char Xthal_mmu_ring_bits = 0; -const unsigned char Xthal_mmu_sr_bits = 0; -const unsigned char Xthal_mmu_ca_bits = 0; -#else -const unsigned char Xthal_mmu_asid_bits = XCHAL_MMU_ASID_BITS; -const unsigned char Xthal_mmu_asid_kernel = XCHAL_MMU_ASID_KERNEL; -const unsigned char Xthal_mmu_rings = XCHAL_MMU_RINGS; -const unsigned char Xthal_mmu_ring_bits = XCHAL_MMU_RING_BITS; -const unsigned char Xthal_mmu_sr_bits = XCHAL_MMU_SR_BITS; -const unsigned char Xthal_mmu_ca_bits = XCHAL_MMU_CA_BITS; -#endif -#if XCHAL_HAVE_TLBS -const unsigned int Xthal_mmu_max_pte_page_size = XCHAL_MMU_MAX_PTE_PAGE_SIZE; -const unsigned int Xthal_mmu_min_pte_page_size = XCHAL_MMU_MIN_PTE_PAGE_SIZE; -const unsigned char Xthal_itlb_way_bits = XCHAL_ITLB_WAY_BITS; -const unsigned char Xthal_itlb_ways = XCHAL_ITLB_WAYS; -const unsigned char Xthal_itlb_arf_ways = XCHAL_ITLB_ARF_WAYS; -const unsigned char Xthal_dtlb_way_bits = XCHAL_DTLB_WAY_BITS; -const unsigned char Xthal_dtlb_ways = XCHAL_DTLB_WAYS; -const unsigned char Xthal_dtlb_arf_ways = XCHAL_DTLB_ARF_WAYS; -#else -const unsigned int Xthal_mmu_max_pte_page_size = 0; -const unsigned int Xthal_mmu_min_pte_page_size = 0; -const unsigned char Xthal_itlb_way_bits = 0; -const unsigned char Xthal_itlb_ways = 0; -const unsigned char Xthal_itlb_arf_ways = 0; -const unsigned char Xthal_dtlb_way_bits = 0; -const unsigned char Xthal_dtlb_ways = 0; -const unsigned char Xthal_dtlb_arf_ways = 0; -#endif - - -/* Internal memories... */ - -const unsigned char Xthal_num_instrom = XCHAL_NUM_INSTROM; -const unsigned char Xthal_num_instram = XCHAL_NUM_INSTRAM; -const unsigned char Xthal_num_datarom = XCHAL_NUM_DATAROM; -const unsigned char Xthal_num_dataram = XCHAL_NUM_DATARAM; -const unsigned char Xthal_num_xlmi = XCHAL_NUM_XLMI; - -/* Define arrays of internal memories' addresses and sizes: */ -#define MEMTRIPLET(n,mem,memcap) _MEMTRIPLET(n,mem,memcap) -#define _MEMTRIPLET(n,mem,memcap) MEMTRIPLET##n(mem,memcap) -#define MEMTRIPLET0(mem,memcap) \ - const unsigned int Xthal_##mem##_vaddr[1] = { 0 }; \ - const unsigned int Xthal_##mem##_paddr[1] = { 0 }; \ - const unsigned int Xthal_##mem##_size [1] = { 0 }; -#define MEMTRIPLET1(mem,memcap) \ - const unsigned int Xthal_##mem##_vaddr[1] = { XCHAL_##memcap##0_VADDR }; \ - const unsigned int Xthal_##mem##_paddr[1] = { XCHAL_##memcap##0_PADDR }; \ - const unsigned int Xthal_##mem##_size [1] = { XCHAL_##memcap##0_SIZE }; -#define MEMTRIPLET2(mem,memcap) \ - const unsigned int Xthal_##mem##_vaddr[2] = { XCHAL_##memcap##0_VADDR, XCHAL_##memcap##1_VADDR }; \ - const unsigned int Xthal_##mem##_paddr[2] = { XCHAL_##memcap##0_PADDR, XCHAL_##memcap##1_PADDR }; \ - const unsigned int Xthal_##mem##_size [2] = { XCHAL_##memcap##0_SIZE, XCHAL_##memcap##1_SIZE }; -MEMTRIPLET(XCHAL_NUM_INSTROM, instrom, INSTROM) -MEMTRIPLET(XCHAL_NUM_INSTRAM, instram, INSTRAM) -MEMTRIPLET(XCHAL_NUM_DATAROM, datarom, DATAROM) -MEMTRIPLET(XCHAL_NUM_DATARAM, dataram, DATARAM) -MEMTRIPLET(XCHAL_NUM_XLMI, xlmi, XLMI) - -/* Timer info... */ - -const unsigned char Xthal_have_ccount = XCHAL_HAVE_CCOUNT; -const unsigned char Xthal_num_ccompare = XCHAL_NUM_TIMERS; - -#ifdef INCLUDE_DEPRECATED_HAL_CODE -const unsigned char Xthal_have_old_exc_arch = XCHAL_HAVE_XEA1; -const unsigned char Xthal_have_mmu = XCHAL_HAVE_TLBS; -const unsigned int Xthal_num_regs = XCHAL_NUM_AREGS; /*DEPRECATED*/ -const unsigned char Xthal_num_irom = XCHAL_NUM_INSTROM; /*DEPRECATED*/ -const unsigned char Xthal_num_iram = XCHAL_NUM_INSTRAM; /*DEPRECATED*/ -const unsigned char Xthal_num_drom = XCHAL_NUM_DATAROM; /*DEPRECATED*/ -const unsigned char Xthal_num_dram = XCHAL_NUM_DATARAM; /*DEPRECATED*/ -const unsigned int Xthal_configid0 = XCHAL_HW_CONFIGID0; -const unsigned int Xthal_configid1 = XCHAL_HW_CONFIGID1; -#endif - diff --git a/src/arch/xtensa/hal/miscellaneous.S b/src/arch/xtensa/hal/miscellaneous.S deleted file mode 100644 index 1a3ebf49b001..000000000000 --- a/src/arch/xtensa/hal/miscellaneous.S +++ /dev/null @@ -1,56 +0,0 @@ -// -// miscellaneous.S - miscellaneous assembly language routines -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/miscellaneous.S#1 $ - -// Copyright (c) 2011 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include - - - .text - -//---------------------------------------------------------------------- -// Clear any remnant register state pointing to (or containing) code. -// Specifically, clear loop registers (LCOUNT) to avoid hardware loopback -// from LEND to LBEGIN when new code is loaded where code containing a -// zero-overhead loop was located. See the HAL chapter of the Tensilica -// System Software Reference Manual for details on the use of this function. -//---------------------------------------------------------------------- - -#if defined(__SPLIT__clear_regcached_code) || \ - defined(__SPLIT__clear_regcached_code_nw) - -DECLFUNC(xthal_clear_regcached_code) - abi_entry -# if XCHAL_HAVE_LOOPS - movi a2, 0 - wsr.lcount a2 -# endif - isync_retw_nop - abi_return - endfunc - -#endif - diff --git a/src/arch/xtensa/hal/mmu.c b/src/arch/xtensa/hal/mmu.c deleted file mode 100644 index ef4165206795..000000000000 --- a/src/arch/xtensa/hal/mmu.c +++ /dev/null @@ -1,93 +0,0 @@ -// -// mmu.c - MMU related functions -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/mmu.c#1 $ - -// Copyright (c) 2002, 2008 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -/* - * Convert a virtual address to a physical address - * (through static maps only). - * Returns 0 if successful (*paddrp is set), -1 if not (no mapping). - */ -int xthal_static_v2p( unsigned vaddr, unsigned *paddrp /*, unsigned len, unsigned rasid*/ ) -{ -#if XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY - if( vaddr >= XCHAL_KSEG_CACHED_VADDR - && vaddr <= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE ) - vaddr += XCHAL_KSEG_CACHED_PADDR - XCHAL_KSEG_CACHED_VADDR; - else if( vaddr >= XCHAL_KSEG_BYPASS_VADDR - && vaddr <= XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE ) - vaddr += XCHAL_KSEG_BYPASS_PADDR - XCHAL_KSEG_BYPASS_VADDR; - else if( vaddr >= XCHAL_KIO_CACHED_VADDR - && vaddr <= XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE ) - vaddr += XCHAL_KIO_CACHED_PADDR - XCHAL_KIO_CACHED_VADDR; - else if( vaddr >= XCHAL_KIO_BYPASS_VADDR - && vaddr <= XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE ) - vaddr += XCHAL_KIO_BYPASS_PADDR - XCHAL_KIO_BYPASS_VADDR; - else - return( -1 ); /* no known mapping */ -#endif /* XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY */ - *paddrp = vaddr; /* virtual == physical */ - return( 0 ); -} - -/* - * Convert a physical address to a virtual address - * (through static maps only). - * Returns 0 if successful (*vaddrp is set), -1 if not (no mapping). - * - * NOTE: A physical address can be mapped from multiple virtual addresses - * (or one or none). - * There should be better parameter(s) to help select the mapping returned - * (eg. cache mode, address, asid, etc), or somehow return them all. - * Mappings returned currently assume the current RASID setting. - */ -int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, /*unsigned len, unsigned rasid,*/ unsigned cached ) -{ -#if XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY - if( cached ) { - if( paddr >= XCHAL_KSEG_CACHED_PADDR - && paddr <= XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE ) - paddr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_CACHED_PADDR; - else if( paddr >= XCHAL_KIO_BYPASS_PADDR - && paddr <= XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE ) - paddr += XCHAL_KIO_BYPASS_VADDR - XCHAL_KIO_BYPASS_PADDR; - else - return -1; /* no known mapping */ - } else { - if( paddr >= XCHAL_KSEG_BYPASS_PADDR - && paddr <= XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE ) - paddr += XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_BYPASS_PADDR; - else if( paddr >= XCHAL_KIO_CACHED_PADDR - && paddr <= XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE ) - paddr += XCHAL_KIO_CACHED_VADDR - XCHAL_KIO_CACHED_PADDR; - else - return -1; /* no known mapping */ - } -#endif /* XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY */ - *vaddrp = paddr; /* virtual == physical */ - return( 0 ); -} - diff --git a/src/arch/xtensa/hal/mp_asm.S b/src/arch/xtensa/hal/mp_asm.S deleted file mode 100644 index cc203436ad67..000000000000 --- a/src/arch/xtensa/hal/mp_asm.S +++ /dev/null @@ -1,123 +0,0 @@ -// -// mp_asm.S - multi-processor synchronization routines -// -// $Id$ - -// Copyright (c) 2003, 2005, 2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - -/* - int xthal_compare_and_set( int *address, int test_value, int set_value ) - - Atomically sets *address to set_value if *address equals test_value. - Returns the previous value of *address (the one compared with test_value). - - Uses the S32C1I instruction if available. - S32C1I requires special support from the memory controller for - memory accessed via the PIF interface. For this and other reasons, - S32C1I might not work on the entire 4GB address range. This function - does not test address validity. That is the responsibility of the - software invoking this function. -*/ - .text - .align 4 - .global xthal_compare_and_set - .type xthal_compare_and_set,@function - -xthal_compare_and_set: - abi_entry - // a2 == address - // a3 == test value - // a4 == set value - -#if XCHAL_HAVE_EXCLUSIVE - mov a6, a4 // a6 = copy of set_value -1: - l32ex a5, a2 // a5 = *address, set monitor - bne a5, a3, 2f // skip write if *address != test_value - mov a4, a6 // a4 = set_value - s32ex a4, a2 // *address = set_value - getex a4 // get result of store - beqz a4, 1b -2: - mov a2, a5 // a2 = *address, return value - clrex // in case we skipped write -#elif XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION_MAJOR >= 2200 - mov a6, a4 // a6 = copy of set_value - movi a5, -1 - xor a5, a5, a3 // a5 = ~a3 - wsr.scompare1 a3 // set test_value -1: - mov a4, a6 // a4 = set_value - s32c1i a4, a2, 0 - bne a4, a5, 2f // if a4 != ~SCOMPARE1 then done - l32i a4, a2, 0 // a4 = *address - bne a4, a5, 1b // retry if *address != ~SCOMPARE1 -2: - mov a2, a4 -#else - mov a7, a2 // a7 == address, a2 is return val -# if XCHAL_HAVE_INTERRUPTS - rsil a5, 15 // a5 == new ps -# endif - l32i a2, a7, 0 // a2 == value to test, return val - bne a3, a2, done // test - - s32i a4, a7, 0 // write the new value - -done: -# if XCHAL_HAVE_INTERRUPTS - wsr.ps a5 // restore the PS - rsync -# endif -#endif - abi_return - - .size xthal_compare_and_set, . - xthal_compare_and_set - - -/* - unsigned xthal_get_prid( void ); - - Returns the value of the PRID register (processor ID), - or 0 if not configured. - (Note: this register, when present, cannot / must-not - change value during runtime; on certain processors, - its value may get sampled only at reset. - It can never be written to, hence - there is no xthal_set_prid() function.) -*/ - .align 4 - .global xthal_get_prid - .type xthal_get_prid,@function -xthal_get_prid: - abi_entry -#if XCHAL_HAVE_PRID - rsr.prid a2 -#else - movi a2, 0 -#endif - abi_return - .size xthal_get_prid, . - xthal_get_prid - diff --git a/src/arch/xtensa/hal/mpu.c b/src/arch/xtensa/hal/mpu.c deleted file mode 100644 index bacbfc6ff0c7..000000000000 --- a/src/arch/xtensa/hal/mpu.c +++ /dev/null @@ -1,1830 +0,0 @@ -/* - * Copyright (c) 2004-2015 Cadence Design Systems Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include - -#if XCHAL_HAVE_MPU -#include -#include -#include -#include - -/* - * General notes: - * Wherever an address is represented as an unsigned, it has only the 27 most significant bits. This is how - * the addresses are represented in the MPU. It has the benefit that we don't need to worry about overflow. - * - * The asserts in the code are ignored unless an assert handler is set (as it is during testing). - * - * If an assert handler is set, then the MPU map is checked for correctness after every update. - * - * On some configs (actually all configs right now), the MPU entries must be aligned to the background map. - * The constant: XCHAL_MPU_ALIGN_REQ indicates if alignment is required: - * - * The rules for a valid map are: - * - * 1) The entries' vStartAddress fields must always be in non-descending order. - * 2) The entries' memoryType and accessRights must contain valid values - * - * If XCHAL_MPU_ALIGN_REQ == 1 then the following additional rules are enforced: - * 3) If entry0's Virtual Address Start field is nonzero, then that field must equal one of the - * Background Map's Virtual Address Start field values if software ever intends to assert entry0's MPUENB bit. - * 4) If entryN's MPUENB bit will ever be negated while at the same time entryN+1's MPUENB bit is asserted, - * then entryN+1's Virtual Address Start field must equal one of the Background Map's Virtual Address Start field values. - * - * The internal function are first, and the external 'xthal_' functions are at the end. - * - */ -extern void (*_xthal_assert_handler)(); -extern void xthal_write_map_raw(const xthal_MPU_entry* fg, unsigned int n); -extern void xthal_read_map_raw(const xthal_MPU_entry* fg); -extern xthal_MPU_entry _xthal_get_entry(const xthal_MPU_entry* fg, const xthal_MPU_entry* bg, - unsigned int addr, int* infgmap); - -#define MPU_ADDRESS_MASK (0xffffffff << XCHAL_MPU_ALIGN_BITS) -#define MPU_ALIGNMENT_MASK (0xffffffff - MPU_ADDRESS_MASK) - -#define MPU_VSTART_CORRECTNESS_MASK ((0x1 << (XCHAL_MPU_ALIGN_BITS)) - 1) -// Set this to 1 for more extensive internal checking / 0 for production -#define MPU_DEVELOPMENT_MODE 0 - -#if XCHAL_MPU_ALIGN_REQ -#define XCHAL_MPU_WORST_CASE_ENTRIES_FOR_REGION 3 -#else -#define XCHAL_MPU_WORST_CASE_ENTRIES_FOR_REGION 2 -#endif - -/* - * At some point it is faster to commit/invalidate the entire cache rather than going on line at a time. - * If a region is bigger than 'CACHE_REGION_THRESHOLD' we operate on the entire cache. - */ -#if XCHAL_DCACHE_LINESIZE -#define CACHE_REGION_THRESHOLD (32 * XCHAL_DCACHE_LINESIZE / XCHAL_MPU_ALIGN) -#else -#define CACHE_REGION_THRESHOLD 0 -#endif - - -/* - * Normally these functions are no-ops, but the MPU test harness sets an assert handler to detect any inconsistencies in MPU - * entries or any other unexpected internal condition. - */ -#if MPU_DEVELOPMENT_MODE -static void my_assert(int arg) -{ - if (_xthal_assert_handler && !arg) - _xthal_assert_handler(); -} - -static void assert_map_valid() -{ - - if (_xthal_assert_handler) - { - xthal_MPU_entry fg[XCHAL_MPU_ENTRIES]; - xthal_read_map(fg); - if (xthal_check_map(fg, XCHAL_MPU_ENTRIES)) - _xthal_assert_handler(); - } -} - -static void assert_attributes_equivalent(unsigned addr, const xthal_MPU_entry* initial, - const xthal_MPU_entry* fg, const xthal_MPU_entry* bg) -{ - - xthal_MPU_entry e1 = _xthal_get_entry(initial, bg, addr, 0); - xthal_MPU_entry e2 = _xthal_get_entry(fg, bg, addr, 0); - my_assert((XTHAL_MPU_ENTRY_GET_ACCESS(e1) == XTHAL_MPU_ENTRY_GET_ACCESS(e2)) && (XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(e1) == XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(e2))); -} - -static void assert_maps_equivalent(const xthal_MPU_entry* initial, const xthal_MPU_entry* fg, - const xthal_MPU_entry* bg) -{ - /* this function checks that for every address the MPU entries 'initial' result in the same attributes as the entries in 'fg'. - * We only need to check at the addresses that appear in 'initial', 'fg', or 'bg'. - */ - int i; - for (i = 0; i < XCHAL_MPU_ENTRIES; i++) - { - assert_attributes_equivalent(XTHAL_MPU_ENTRY_GET_VSTARTADDR(initial[i]), initial, fg, bg); - assert_attributes_equivalent(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]), initial, fg, bg); - } - for (i = 0; i < XCHAL_MPU_BACKGROUND_ENTRIES; i++) - assert_attributes_equivalent(XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]), initial, fg, bg); -} -#else -#define my_assert(x) -#define assert_map_valid(x) -#endif - -#if 0 -// These functions aren't used, but am leaving the definitions in place -// for possible future use. -static inline unsigned read_mpucfg() -{ - unsigned long tmp; - __asm__ __volatile__("rsr.mpucfg %0\n\t" - : "=a" (tmp)); - return tmp; -} - -static inline unsigned read_mpuenb() -{ - unsigned long tmp; - __asm__ __volatile__("rsr.mpuenb %0\n\t" - : "=a" (tmp)); - return tmp; -} - -/* This function writes the enable for the MPU entries */ -static inline void write_mpuenb(unsigned v) -{ - __asm__ __volatile__("wsr.mpuenb %0\n\t" - : : "a" (v)); -} - -#endif - -static inline void isync() -{ - __asm__ __volatile__("isync\n\t"); -} - -/* This function writes the cache disable register which - * disables the cache by 512MB registers to save power*/ -static inline void write_cacheadrdis(unsigned v) -{ - __asm__ __volatile__("wsr.cacheadrdis %0\n\t" - : : "a" (v)); -} - -inline static int is_cacheable(unsigned int mt); - -#if 0 -static inline void read_map_entry(unsigned en_num, xthal_MPU_entry* en) -{ - unsigned as; - unsigned at0; - unsigned at1; - as = en_num; - __asm__ __volatile__("RPTLB0 %0, %1\n\t" : "+a" (at0) : "a" (as)); - __asm__ __volatile__("RPTLB1 %0, %1\n\t" : "+a" (at1) : "a" (as)); - en->as = at0; - en->at = at1; -} -#endif - -inline static int is_cacheable(unsigned int mt) -{ - return (0x180 & mt) || ((mt & 0x18) == 0x10) || ((mt & 0x30) == 0x30); -} - -inline static int is_writeback(unsigned int mt) -{ - return (((0x180 & mt) && (mt & 0x11)) || - ((((mt & 0x18) == 0x10) || ((mt & 0x30) == 0x30)) & 0x1)); -} - -inline static int is_device(unsigned int mt) -{ - return ((mt & 0x1f0) == 0); -} - -inline static int is_kernel_readable(int accessRights) -{ - switch (accessRights) - { - case XTHAL_AR_R: - case XTHAL_AR_Rr: - case XTHAL_AR_RX: - case XTHAL_AR_RXrx: - case XTHAL_AR_RW: - case XTHAL_AR_RWX: - case XTHAL_AR_RWr: - case XTHAL_AR_RWrw: - case XTHAL_AR_RWrwx: - case XTHAL_AR_RWXrx: - case XTHAL_AR_RWXrwx: - return 1; - case XTHAL_AR_NONE: - case XTHAL_AR_Ww: - return 0; - default: - return XTHAL_BAD_ACCESS_RIGHTS; - } -} - -inline static int is_kernel_writeable(int accessRights) -{ - switch (accessRights) - { - case XTHAL_AR_RW: - case XTHAL_AR_RWX: - case XTHAL_AR_RWr: - case XTHAL_AR_RWrw: - case XTHAL_AR_RWrwx: - case XTHAL_AR_RWXrx: - case XTHAL_AR_RWXrwx: - case XTHAL_AR_Ww: - return 1; - case XTHAL_AR_NONE: - case XTHAL_AR_R: - case XTHAL_AR_Rr: - case XTHAL_AR_RX: - case XTHAL_AR_RXrx: - return 0; - default: - return XTHAL_BAD_ACCESS_RIGHTS; - } -} - -inline static int is_kernel_executable(int accessRights) -{ - switch (accessRights) - { - case XTHAL_AR_RX: - case XTHAL_AR_RXrx: - case XTHAL_AR_RWX: - case XTHAL_AR_RWXrx: - case XTHAL_AR_RWXrwx: - return 1; - case XTHAL_AR_NONE: - case XTHAL_AR_Ww: - case XTHAL_AR_R: - case XTHAL_AR_Rr: - case XTHAL_AR_RW: - case XTHAL_AR_RWr: - case XTHAL_AR_RWrw: - case XTHAL_AR_RWrwx: - return 0; - default: - return XTHAL_BAD_ACCESS_RIGHTS; - } -} - -inline static int is_user_readable(int accessRights) -{ - switch (accessRights) - { - case XTHAL_AR_Rr: - case XTHAL_AR_RXrx: - case XTHAL_AR_RWr: - case XTHAL_AR_RWrw: - case XTHAL_AR_RWrwx: - case XTHAL_AR_RWXrx: - case XTHAL_AR_RWXrwx: - return 1; - case XTHAL_AR_R: - case XTHAL_AR_RX: - case XTHAL_AR_RW: - case XTHAL_AR_RWX: - case XTHAL_AR_NONE: - case XTHAL_AR_Ww: - return 0; - default: - return XTHAL_BAD_ACCESS_RIGHTS; - } -} - -inline static int is_user_writeable(int accessRights) -{ - switch (accessRights) - { - case XTHAL_AR_Ww: - case XTHAL_AR_RWrw: - case XTHAL_AR_RWrwx: - case XTHAL_AR_RWXrwx: - return 1; - case XTHAL_AR_NONE: - case XTHAL_AR_R: - case XTHAL_AR_Rr: - case XTHAL_AR_RX: - case XTHAL_AR_RXrx: - case XTHAL_AR_RW: - case XTHAL_AR_RWX: - case XTHAL_AR_RWr: - case XTHAL_AR_RWXrx: - return 0; - default: - return XTHAL_BAD_ACCESS_RIGHTS; - } -} - -inline static int is_user_executable(int accessRights) -{ - switch (accessRights) - { - case XTHAL_AR_RXrx: - case XTHAL_AR_RWrwx: - case XTHAL_AR_RWXrx: - case XTHAL_AR_RWXrwx: - return 1; - case XTHAL_AR_RW: - case XTHAL_AR_RWX: - case XTHAL_AR_RWr: - case XTHAL_AR_RWrw: - case XTHAL_AR_R: - case XTHAL_AR_Rr: - case XTHAL_AR_RX: - case XTHAL_AR_NONE: - case XTHAL_AR_Ww: - return 0; - default: - return XTHAL_BAD_ACCESS_RIGHTS; - } -} - -/* This function returns the map entry that is used for the address 'addr' (27msb). - * - */ -#if defined(__SPLIT__mpu_basic) - -xthal_MPU_entry _xthal_get_entry(const xthal_MPU_entry* fg, const xthal_MPU_entry* bg, - unsigned int addr, int* infgmap) -{ - int i; - for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) <= addr) - { - if (XTHAL_MPU_ENTRY_GET_VALID(fg[i])) - { - if (infgmap) - *infgmap = 1; - return fg[i]; - } - else - break; - } - } - for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]) <= addr) - { - if (infgmap) - *infgmap = 0; - return bg[i]; - } - } - return bg[0]; // never reached ... just to get rid of compilation warning -} - -/* returns true if the supplied address (27msb) is in the background map. */ -int _xthal_in_bgmap(unsigned int address, const xthal_MPU_entry* bg) -{ - int i; - for (i = 0; i < XCHAL_MPU_BACKGROUND_ENTRIES; i++) - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]) == address) - return 1; - return 0; -} - -#endif - -#if defined(__SPLIT__mpu_attributes) - -/* This function updates the map entry as well as internal duplicate of the map - * state in fg. The assumption is that reading map entries could be somewhat - * expensive in some situations so we are keeping a copy of the map in memory when - * doing extensive map manipulations. - */ -static void write_map_entry(xthal_MPU_entry* fg, unsigned en_num, xthal_MPU_entry en) -{ - en.at = (en.at & 0xffffffe0) | (en_num & 0x1f); - xthal_mpu_set_entry(en); - assert_map_valid(); - fg[en_num] = en; -} - -static void move_map_down(xthal_MPU_entry* fg, int dup, int idx) -{ - /* moves the map entry list down one (leaving duplicate entries at idx and idx+1. This function assumes that the last - * entry is invalid ... call MUST check this - */ - unsigned int i; - for (i = dup; i > idx; i--) - { - write_map_entry(fg, i, fg[i - 1]); - } -} - -static void move_map_up(xthal_MPU_entry* fg, int dup, int idx) -{ - /* moves the map entry list up one (leaving duplicate entries at idx and idx-1, removing the entry at dup - */ - int i; - for (i = dup; i < idx - 1; i++) - { - write_map_entry(fg, i, fg[i + 1]); - } -} - -static int bubble_free_to_ip(xthal_MPU_entry* fg, int ip, int required) -{ - /* This function shuffles the entries in the MPU to get at least 'required' free entries at - * the insertion point 'ip'. This function returns the new insertion point (after all the shuffling). - */ - int i; - int rv = ip; - if (required < 1) - return ip; - my_assert(required <= XCHAL_MPU_ENTRIES); - /* first we search for duplicate or unused entries at an index less than 'ip'. We start looking at ip-1 - * (rather than 0) to minimize the number of shuffles required. - */ - for (i = ip - 2; i >= 0 && required;) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i + 1])) - { - move_map_up(fg, i, ip); - rv--; - required--; - } - i--; - } - // if there are any invalid entries at top of the map, we can remove them to make space - while (required) - { - if (!XTHAL_MPU_ENTRY_GET_VALID(fg[0])) - { - move_map_up(fg, 0, ip); - rv--; - required--; - } - else - break; - } - /* If there are not enough unneeded entries at indexes less than ip, then we search at indexes > ip. - * We start the search at ip+1 and move down, again to minimize the number of shuffles required. - */ - - for (i = ip + 1; i < XCHAL_MPU_ENTRIES && required;) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1])) - { - move_map_down(fg, i, ip); - required--; - } - else - i++; - } - my_assert(required == 0); - return rv; -} - - -/* This function removes 'inaccessible' entries from the MPU map (those that are hidden by previous entries - * in the map). It leaves any entries that match background entries in place. - */ -static void remove_inaccessible_entries(xthal_MPU_entry* fg, const xthal_MPU_entry* bg) -{ - int i; - for (i = 1; i < XCHAL_MPU_ENTRIES; i++) - { - if (((XTHAL_MPU_ENTRY_GET_VALID(fg[i]) == XTHAL_MPU_ENTRY_GET_VALID(fg[i - 1])) && (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) > XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1])) - && (XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[i]) == XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[i - 1])) && (XTHAL_MPU_ENTRY_GET_ACCESS(fg[i]) == XTHAL_MPU_ENTRY_GET_ACCESS(fg[i - 1])) && - /* we can only remove the background map entry if either background alignment is not required, or - * if the previous entry is enabled. - */ - (!_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]), bg))) - || ((!XTHAL_MPU_ENTRY_GET_VALID(fg[i]) && (!XTHAL_MPU_ENTRY_GET_VALID(fg[i - 1])) && (!_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]), bg))))) - { - write_map_entry(fg, i, fg[i - 1]); - } - } -} - -/* This function takes bitwise or'd combination of access rights and memory type, and extracts - * the access rights. It returns the access rights, or -1. - */ -static int encode_access_rights(int cattr) -{ - cattr = cattr & 0xF; - if ((cattr) > 0 && (cattr < 4)) - return -1; - else - return cattr; -} - -/* - * returns the largest value rv, such that for every index < rv, - * entrys[index].vStartAddress < first. - * - * Assumes an ordered entry array (even disabled entries must be ordered). - * value returned is in the range [0, XCHAL_MPU_ENTRIES]. - * - */ -static int find_entry(xthal_MPU_entry* fg, unsigned first) -{ - int i; - for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) <= first) - return i + 1; - } - return 0; // if it is less than all existing entries return 0 -} - -/* - * This function returns 1 if there is an exact match for first and first+size - * so that no manipulations are necessary before safing and updating the attributes - * for [first, first+size). The the first and end entries - * must be valid, as well as all the entries in between. Otherwise the memory - * type might change across the region and we wouldn't be able to safe the caches. - * - * An alternative would be to require alignment regions in this case, but that seems - * more wasteful. - */ -static int needed_entries_exist(xthal_MPU_entry* fg, unsigned first, unsigned last) -{ - int i; - for (i = 0; i < XCHAL_MPU_ENTRIES; i++) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == first) - { - int j; - /* special case ... is last at the end of the address space - * ... if so there is no end entry needed. - */ - if (last == 0xFFFFFFFF) - { - int k; - for (k = i; k < XCHAL_MPU_ENTRIES; k++) - if (!XTHAL_MPU_ENTRY_GET_VALID(fg[k])) - return 0; - return 1; - } - /* otherwise search for the end entry */ - for (j = i; j < XCHAL_MPU_ENTRIES; j++) - if (last == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[j])) - { - int k; - for (k = i; k <= j; k++) - if (!XTHAL_MPU_ENTRY_GET_VALID(fg[k])) - return 0; - return 1; - } - return 0; - } - } - return 0; -} - -/* This function computes the number of MPU entries that are available for use in creating a new - * region. - */ -static int number_available(xthal_MPU_entry* fg) -{ - int i; - int rv = 0; - int valid_seen = 0; - for (i = 0; i < XCHAL_MPU_ENTRIES; i++) - { - if (!valid_seen) - { - if (XTHAL_MPU_ENTRY_GET_VALID(fg[i])) - valid_seen = 1; - else - { - rv++; - continue; - } - } - else - { - if (i && (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1]))) - rv++; - } - } - return rv; -} - -/* - * This function returns index of the background map entry that maps the address 'first' if there are no - * enabled/applicable foreground map entries. - */ -static int get_bg_map_index(const xthal_MPU_entry* bg, unsigned first) -{ - int i; - for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--) - if (first > XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i])) - return i; - return 0; -} - -inline static unsigned int covert_to_writethru_memtype(unsigned int wb_memtype) -{ - unsigned int prefix = wb_memtype & 0x1f0; - if (prefix == 0x10) - return wb_memtype & 0xfffffffe; - else - return wb_memtype & 0xffffffee; -} - -/* - * This function takes the region pointed to by ip, and makes it safe from the aspect of cache coherency, before - * changing the memory type and possibly corrupting the cache. If wb is 0, then that indicates - * that we should ignore uncommitted entries. If the inv argument is 0 that indicates that we shouldn't invalidate - * the cache before switching to bypass. - */ -static void safe_region(xthal_MPU_entry* fg, int ip, unsigned end_of_segment, int memoryType, int wb, int inv, - unsigned int* post_inv_all) -{ - unsigned length = end_of_segment - XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip]); // initially keep length 27msb to avoid possibility of overflow - if (!length) - return; // if the region is empty, there is no need to safe it - - int cmemType = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[ip]); - - if (memoryType == cmemType) - return; // not changing memory types ... we don't need to do anything - - int mt_is_wb = is_writeback(memoryType); - int mt_is_ch = is_cacheable(memoryType); - - // nothing needs to be done in these cases - if (mt_is_wb || (!wb && (!inv || mt_is_ch))) - return; - - int need_flush = wb && (is_writeback(cmemType) && !is_writeback(memoryType)); - int need_invalidate = inv && (is_cacheable(cmemType) && !is_cacheable(memoryType)); - - void* addr = (void*) XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip]); - - int write_by_region = length < CACHE_REGION_THRESHOLD; - - if (need_flush) - { - XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(fg[ip], covert_to_writethru_memtype(XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[ip]))); - // If the AR == NONE, the writing back the cache may generate exception. Temporarily open up the protections ... - // ... - if (XTHAL_MPU_ENTRY_GET_ACCESS(fg[ip]) == XTHAL_AR_NONE) - XTHAL_MPU_ENTRY_SET_ACCESS(fg[ip], XTHAL_AR_RWXrwx); - // bit 0 determines if it wb/wt - write_map_entry(fg, ip, fg[ip]); - if (!write_by_region) - { - /* unfortunately there is no straight forward way to avoid the possibility of doing - * multiple xthal_dcache_all_writeback() calls during a region update. The reason for this - * is that: - * - * 1) The writeback must be done before the memory type is changed to non-cacheable before - * an invalidate (see below) - * - * 2) it isn't possible to reorganize the loop so that all the writebacks are done before - * any of the invalidates because if part of the region of interest is (initially) mapped - * by the background map, then a single foreground entry is reused to 'safe' across - * each background map entry that is overlapped. - */ - xthal_dcache_all_writeback(); - } - else if (length) - xthal_dcache_region_writeback(addr, length); - } - - if (need_invalidate) - { - XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(fg[ip], - XTHAL_ENCODE_MEMORY_TYPE(XCHAL_CA_BYPASS)); - write_map_entry(fg, ip, fg[ip]); - /* only need to call all_invalidate once ... check - * if it has already been done. - */ - if (!*post_inv_all) - { - if (!write_by_region) - { - *post_inv_all = 1; - } - else if (length) - { - xthal_icache_region_invalidate(addr, length); - xthal_dcache_region_writeback_inv(addr, length); - } - } - } -} - -static unsigned max(unsigned a, unsigned b, unsigned c) -{ - if (a > b && a > c) - return a; - else if (b > c) - return b; - else - return c; -} - -/* This function returns the next address to commit which will be the greatest of the following: - * 1) The start of the region we are creating - * 2) The vStartAddress of the previous entry - * 3) The background map entry that precedes the current address (last address committed). - */ -static unsigned next_address_to_commit(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, unsigned first, - int current_index) -{ - unsigned current = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[current_index]); - return max(first, XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[current_index - 1]), XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[get_bg_map_index(bg, current)])); -} - -/* - * This function does a series of calls to safe_region() to ensure that no data will be corrupted when changing the memory type - * of an MPU entry. These calls are made for every entry address in the range[first,end), as well as at any background region boundary - * in the range[first,end). In general it is necessary to safe at the background region boundaries, because the memory type could - * change at that address. - * - * This function is written to reuse already needed entries for the background map 'safes' which complicates things somewhat. - * - * After the calls to safe region are complete, then the entry attributes are updated for every entry in the range [first,end). - */ -static void safe_and_commit_overlaped_regions(xthal_MPU_entry* fg, const xthal_MPU_entry*bg, unsigned first, - unsigned last, int memoryType, int accessRights, int wb, int inv) -{ - int i; - unsigned int next; - unsigned end_of_segment = last; - unsigned post_inv_all = 0; - unsigned int cachedisadr; - write_cacheadrdis(0); - for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--) - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < last) - { - // first we want to commit the first entry - safe_region(fg, i, end_of_segment, memoryType, wb, inv, &post_inv_all); - end_of_segment = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]); - do - { - next = next_address_to_commit(fg, bg, first, i); - if (next == XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i - 1])) - i--; - XTHAL_MPU_ENTRY_SET_VSTARTADDR(fg[i], next); - safe_region(fg, i, last, memoryType, wb, inv, &post_inv_all); - end_of_segment = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]); - } while (next > first); - if (post_inv_all) - { - xthal_icache_all_invalidate(); - xthal_dcache_all_writeback_inv(); - } - for (; i < XCHAL_MPU_ENTRIES && XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < last; i++) - { - XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(fg[i], memoryType); - XTHAL_MPU_ENTRY_SET_ACCESS(fg[i], accessRights); - XTHAL_MPU_ENTRY_SET_VALID(fg[i], 1); - write_map_entry(fg, i, fg[i]); - } - break; - } - cachedisadr = xthal_calc_cacheadrdis(fg, XCHAL_MPU_ENTRIES); - write_cacheadrdis(cachedisadr); -} - -static void handle_invalid_pred(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, unsigned first, int ip) -{ - /* Handle the case where there is an invalid entry immediately preceding the entry we - * are creating. If the entries addresses correspond to the same bg map, then we - * make the previous entry valid with same attributes as the background map entry. - * - * The case where an invalid entry exists immediately preceding whose address corresponds to a different - * background map entry is handled by create_aligning_entries_if_required(), so nothing is done here. - */ - /* todo ... optimization opportunity, the following block loops through the background map up to 4 times, - * - */ - if (!ip || XTHAL_MPU_ENTRY_GET_VALID(fg[ip - 1])) - return; - { - int i; - unsigned fgipm1_addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip - 1]); - int first_in_bg_map = 0; - int first_bg_map_index = -1; - int fgipm1_bg_map_index = -1; -#if MPU_DEVELOPMENT_MODE - unsigned fgip_addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip]); - int fgip_bg_map_index = -1; -#endif - for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--) - { - unsigned addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]); - if (addr == first) - first_in_bg_map = 1; - if (addr < fgipm1_addr && fgipm1_bg_map_index == -1) - fgipm1_bg_map_index = i; -#if MPU_DEVELOPMENT_MODE - if (addr < fgip_addr && fgip_bg_map_index == -1) - fgip_bg_map_index = i; -#endif - if (addr < first && first_bg_map_index == -1) - first_bg_map_index = i; - } - if (!first_in_bg_map && (first_bg_map_index == fgipm1_bg_map_index)) - { - // There should be a subsequent entry that falls in the address range of same - // background map entry ... if not, we have a problem because the following - // will corrupt the memory map -#if MPU_DEVELOPMENT_MODE - { - my_assert(fgip_bg_map_index == fgipm1_bg_map_index); - } -#endif - xthal_MPU_entry temp = _xthal_get_entry(fg, bg, XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip - 1]), 0); - XTHAL_MPU_ENTRY_SET_VSTARTADDR(temp, XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[ip - 1])); - write_map_entry(fg, ip - 1, temp); - } - } -} - -/* This function inserts a entry (unless it already exists) with vStartAddress of first. The new entry has - * the same accessRights and memoryType as the address first had before the call. - * - * If 'invalid' is specified, then insert an invalid region if no foreground entry exists for the address 'first'. - */ -static int insert_entry_if_needed_with_existing_attr(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, - unsigned first, int invalid) -{ - int i; - int ip; - int infg; - int found = 0; - - for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--) - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) == first) - { - if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]) || invalid) - return XTHAL_SUCCESS; - else - { - found = 1; - ip = i; - break; - } - } - - if (!found) - { - if (!number_available(fg)) - return XTHAL_OUT_OF_ENTRIES; - - ip = find_entry(fg, first); - ip = bubble_free_to_ip(fg, ip, 1); - } - if (!invalid) - handle_invalid_pred(fg, bg, first, ip); - xthal_MPU_entry n; - memset(&n, 0, sizeof(n)); - n = _xthal_get_entry(fg, bg, first, &infg); - - if (invalid && !infg) // If the entry mapping is currently in the foreground we can't make - // the entry invalid without corrupting the attributes of the following entry. - XTHAL_MPU_ENTRY_SET_VALID(n, 0); - XTHAL_MPU_ENTRY_SET_VSTARTADDR(n,first); - write_map_entry(fg, ip, n); - return XTHAL_SUCCESS; -} - -static unsigned int smallest_entry_greater_than_equal(xthal_MPU_entry* fg, unsigned x) -{ - int i; - for (i = 0; i < XCHAL_MPU_ENTRIES; i++) - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) >= x) - return XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]); - return 0; -} - -/* This function creates background map aligning entries if required.*/ -static unsigned int create_aligning_entries_if_required(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, - unsigned x) -{ -#if XCHAL_MPU_ALIGN_REQ - int i; - int rv; - unsigned next_entry_address = 0; - unsigned next_entry_valid = 0; - int preceding_bg_entry_index_x = get_bg_map_index(bg, x); - unsigned preceding_bg_entry_x_addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[preceding_bg_entry_index_x]); - for (i = XCHAL_MPU_ENTRIES - 1; i >= 0; i--) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < x) - { - if (XTHAL_MPU_ENTRY_GET_VALID(fg[i])) - return XTHAL_SUCCESS; // If there is a valid entry immediately before the proposed new entry - // ... then no aligning entries are required - break; - } - else - { - next_entry_address = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]); - next_entry_valid = XTHAL_MPU_ENTRY_GET_VALID(fg[i]); - } - } - - /* - * before creating the aligning entry, we may need to create an entry or entries a higher - * addresses to limit the scope of the aligning entry. - */ - if ((!next_entry_address) || (!next_entry_valid) || (_xthal_in_bgmap(next_entry_address, bg))) - { - /* in this case, we can just create an invalid entry at the start of the new region because - * a valid entry could have an alignment problem. An invalid entry is safe because we know that - * the next entry is either invalid, or is on a bg map entry - */ - if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, x, 1)) != XTHAL_SUCCESS) - { - return rv; - } - } - else - { - unsigned next_bg_entry_index; - for (next_bg_entry_index = 0; next_bg_entry_index < XCHAL_MPU_BACKGROUND_ENTRIES; next_bg_entry_index++) - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[next_bg_entry_index]) > x) - break; - if (next_entry_address == XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[next_bg_entry_index])) // In this case there is no intervening bg entry - // between the new entry x, and the next existing entry so, we don't need any limiting entry - // (the existing next_entry serves as the limiting entry) - { /* intentionally empty */ - } - else - { - // In this case we need to create a valid region at the background entry that immediately precedes - // next_entry_address, and then create an invalid entry at the background entry immediately after - // x - if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, XTHAL_MPU_ENTRY_GET_VSTARTADDR(_xthal_get_entry(fg, bg, x, 0)), 0)) - != XTHAL_SUCCESS) - { - return rv; - } - if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, - XTHAL_MPU_ENTRY_GET_VSTARTADDR(_xthal_get_entry(fg, bg, XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[next_bg_entry_index]), 0)), 1)) != XTHAL_SUCCESS) - { - return rv; - } - } - } - - /* now we are finally ready to create the aligning entry.*/ - if (!(x == preceding_bg_entry_x_addr)) - if ((rv = insert_entry_if_needed_with_existing_attr(fg, bg, preceding_bg_entry_x_addr, 0)) != XTHAL_SUCCESS) - { - return rv; - } - - return XTHAL_SUCCESS; - -#else - return XTHAL_SUCCESS; -#endif -} - -static unsigned start_initial_region(xthal_MPU_entry* fg, const xthal_MPU_entry* bg, unsigned first, - unsigned end) -{ - int i; - unsigned addr; - for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i >= 0; i--) - { - addr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(bg[i]); - if (addr <= first) - break; - if (addr < end) - return addr; - } - return first; -} - -static int safe_add_region(unsigned first, unsigned last, unsigned accessRights, unsigned memoryType, - unsigned writeback, unsigned invalidate) -{ - /* This function sets the memoryType and accessRights on a region of memory. If necessary additional MPU entries - * are created so that the attributes of any memory outside the specified region are not changed. - * - * This function has 2 stages: - * 1) The map is updated one entry at a time to create (if necessary) new entries to mark the beginning and end of the - * region as well as addition alignment entries if needed. During this stage the map is always correct, and the memoryType - * and accessRights for every address remain the same. - * 2) The entries inside the update region are then safed for cache consistency (if necessary) and then written with - * the new accessRights, and memoryType. - * - * If the function fails (by running out of available map entries) during stage 1 then everything is still consistent and - * it is safe to return an error code. - * - * If XCHAL_MPU_ALIGN_REQ is provided then extra entries are create if needed - * to satisfy these alignment conditions: - * - * 1) If entry0's Virtual Address Start field is nonzero, then that field must equal one of the Background Map's - * Virtual Address Start field values if software ever intends to assert entry0's MPUENB bit. - * 2) If entryN's MPUENB bit will ever be negated while at the same time entryN+1's MPUENB bit is - * asserted, then entryN+1's Virtual Address Start field must equal one of the Background Map's Virtual Address Start field values. - * - * Between 0 and 2 available entries will be used by this function. In addition, if XCHAL_MPU_ALIGN_REQ == 1 up to ??? - * additional entries will be needed to meet background map alignment requirements. - * - * This function keeps a copy of the current map in 'fg'. This is kept in sync with contents of the MPU at all times. - * - */ - - int rv; - - xthal_MPU_entry fg[XCHAL_MPU_ENTRIES]; -#if MPU_DEVELOPMENT_MODE - xthal_MPU_entry on_entry[XCHAL_MPU_ENTRIES]; - xthal_read_map(on_entry); -#endif - xthal_read_map(fg); - assert_map_valid(); - - /* First we check and see if consecutive entries at first, and first + size already exist. - * in this important special case we don't need to do anything but safe and update the entries [first, first+size). - * - */ - - if (!needed_entries_exist(fg, first, last)) - { - unsigned x; - unsigned pbg; - - /* - * If we are tight on entries, the first step is to remove any redundant entries in the MPU - * to make room to ensure that there is room for the new entries we need. - * - * We need to call it here ... once we have started transforming the map it is too late - * (the process involves creating inaccessible entries that could potentially get removed). - */ - if (number_available(fg) < XCHAL_MPU_WORST_CASE_ENTRIES_FOR_REGION) - remove_inaccessible_entries(fg, Xthal_mpu_bgmap); -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - // First we create foreground entries that 'duplicate' background entries to aide in - // maintaining proper alignment. - if ((rv = create_aligning_entries_if_required(fg, Xthal_mpu_bgmap, first)) != XTHAL_SUCCESS) - return rv; - - // First we write the terminating entry for our region - // 5 cases: - // 1) end is at the end of the address space, then we don't need to do anything ... takes 0 entries - // 2) There is an existing entry at end ... another nop ... 0 entries - // 3) end > than any existing entry ... in this case we just create a new invalid entry at end to mark - // end of the region. No problem with alignment ... this takes 1 entry - // 4) otherwise if there is a background map boundary between end and x ,the smallest existing entry that is - // greater than end, then we first create an equivalent foreground map entry for the background map entry that immediately - // precedes x, and then we write an invalid entry for end. Takes 2 entries - // 5) otherwise x is in the same background map entry as end, in this case we write a new foreground entry with the existing - // attributes at end - - if (last == 0xFFFFFFFF) - { /* the end is the end of the address space ... do nothing */ - } - else - { - x = smallest_entry_greater_than_equal(fg, last); - if (last == x) - { /* another nop */ - } - else if (last > x) - { /* there is no entry that has a start after the new region ends - ... we handle this by creating an invalid entry at the end point */ - if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, last, 1)) != XTHAL_SUCCESS) - { -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - return rv; - } -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - } - else - { - pbg = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[get_bg_map_index(Xthal_mpu_bgmap, x)]); - /* so there is an existing entry we must deal with. We next need to find - * if there is an existing background entry in between the end of - * the new region and beginning of the next. - */ - if ((pbg != x) && (pbg > last)) - { - /* okay ... there is an intervening background map entry. We need - * to handle this by inserting an aligning entry (if the architecture requires it) - * and then placing writing an invalid entry at end. - */ - if (XCHAL_MPU_ALIGN_REQ) - { - if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, pbg, 0)) != XTHAL_SUCCESS) - { -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - return rv; - } -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - } - if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, last, 1)) != XTHAL_SUCCESS) - { -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - return rv; - } -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - } - else - /* ok so there are no background map entry in between end and x, in this case - * we just need to create a new entry at end writing the existing attributes. - */ - if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, last, 1)) != XTHAL_SUCCESS) - { -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - return rv; - } -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - } - } - - /* last, but not least we need to insert a entry at the starting address for our new region */ - if ((rv = insert_entry_if_needed_with_existing_attr(fg, Xthal_mpu_bgmap, start_initial_region(fg, Xthal_mpu_bgmap, first, last), 0)) - != XTHAL_SUCCESS) - { -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - return rv; - } -#if MPU_DEVELOPMENT_MODE - assert_maps_equivalent(on_entry, fg, Xthal_mpu_bgmap); -#endif - } - // up to this point, the attributes of every byte in the address space should be the same as when this function - // was called. - safe_and_commit_overlaped_regions(fg, Xthal_mpu_bgmap, first, last, memoryType, accessRights, writeback, invalidate); - - assert_map_valid(); - return XTHAL_SUCCESS; -} - -// checks if x (full 32bit) is mpu_aligned for MPU -static unsigned int mpu_aligned(unsigned x) -{ - return !(x & MPU_ALIGNMENT_MASK); -} - -static unsigned int mpu_align(unsigned int x, unsigned int roundUp) -{ - if (roundUp) - return (x + MPU_ALIGNMENT_MASK) & MPU_ADDRESS_MASK; - else - return (x & MPU_ADDRESS_MASK); -} - -#endif - -#if defined(__SPLIT__mpu_check) -static int bad_accessRights(unsigned ar) -{ - if (ar == 0 || (ar >= 4 && ar <= 15)) - return 0; - else - return 1; -} - -/* this function checks if the supplied map 'fg' is a valid MPU map using 3 criteria: - * 1) if an entry is valid, then that entries accessRights must be defined (0 or 4-15). - * 2) The map entries' 'vStartAddress's must be in increasing order. - * 3) If the architecture requires background map alignment then: - * a) If entry0's 'vStartAddress' field is nonzero, then that field must equal - * one of the Background Map's 'vStartAddress' field values if the entry 0's valid bit is set. - * b) If entryN's 'valid' bit is 0 and entry[N+1]'s 'valid' bit is 1, then - * entry[N+1]'s 'vStartAddress' field must equal one of the Background Map's 'vStartAddress' field values. - * - * This function returns XTHAL_SUCCESS if the map satisfies the condition, otherwise it returns - * XTHAL_BAD_ACCESS_RIGHTS, XTHAL_OUT_OF_ORDER_MAP, or XTHAL_MAP_NOT_ALIGNED. - * - */ -static int check_map(const xthal_MPU_entry* fg, unsigned int n, const xthal_MPU_entry* bg) -{ - int i; - unsigned current = 0; - if (!n) - return XTHAL_SUCCESS; - if (n > XCHAL_MPU_ENTRIES) - return XTHAL_OUT_OF_ENTRIES; - for (i = 0; i < n; i++) - { - if (XTHAL_MPU_ENTRY_GET_VALID(fg[i]) && bad_accessRights(XTHAL_MPU_ENTRY_GET_ACCESS(fg[i]))) - return XTHAL_BAD_ACCESS_RIGHTS; - if ((XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) < current)) - return XTHAL_OUT_OF_ORDER_MAP; - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]) & MPU_VSTART_CORRECTNESS_MASK) - return XTHAL_MAP_NOT_ALIGNED; - current = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i]); - } - if (XCHAL_MPU_ALIGN_REQ && XTHAL_MPU_ENTRY_GET_VALID(fg[0]) && XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[0]) - && !_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[0]), bg)) - return XTHAL_MAP_NOT_ALIGNED; - for (i = 0; i < n- 1; i++) - if (XCHAL_MPU_ALIGN_REQ && !XTHAL_MPU_ENTRY_GET_VALID(fg[i]) && XTHAL_MPU_ENTRY_GET_VALID(fg[i + 1]) - && !_xthal_in_bgmap(XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[i + 1]), bg)) - return XTHAL_MAP_NOT_ALIGNED; - return XTHAL_SUCCESS; -} - - - -/* - * this function checks that the bit-wise or-ed XTHAL_MEM_... bits in x correspond to a valid - * MPU memoryType. If x is valid, then 0 is returned, otherwise XTHAL_BAD_MEMORY_TYPE is - * returned. - */ -static int check_memory_type(unsigned x) -{ - unsigned system_cache_type = _XTHAL_MEM_CACHE_MASK(x); - unsigned processor_cache_type = (((x) & _XTHAL_LOCAL_CACHE_BITS) >> 4); - if ((system_cache_type > XTHAL_MEM_NON_CACHEABLE) || (processor_cache_type > XTHAL_MEM_NON_CACHEABLE)) - return XTHAL_BAD_MEMORY_TYPE; - int processor_cache_type_set = 1; - if (!processor_cache_type) - { - processor_cache_type = system_cache_type << 4; - processor_cache_type_set = 0; - } - unsigned device = _XTHAL_MEM_IS_DEVICE(x); - unsigned system_noncacheable = _XTHAL_IS_SYSTEM_NONCACHEABLE(x); - - if (device | system_noncacheable) - { - if ((system_cache_type || processor_cache_type_set) && device) - return XTHAL_BAD_MEMORY_TYPE; - if (processor_cache_type_set) - return XTHAL_BAD_MEMORY_TYPE; // if memory is device or non cacheable, then processor cache type should not be set - if (system_noncacheable && (x & XTHAL_MEM_INTERRUPTIBLE)) - return XTHAL_BAD_MEMORY_TYPE; - { - unsigned z = x & XTHAL_MEM_SYSTEM_SHAREABLE; - if ((z == XTHAL_MEM_INNER_SHAREABLE) || (z == XTHAL_MEM_OUTER_SHAREABLE)) - return XTHAL_BAD_MEMORY_TYPE; - } - } - else - { - if ((x & XTHAL_MEM_SYSTEM_SHAREABLE) == XTHAL_MEM_SYSTEM_SHAREABLE) - return XTHAL_BAD_MEMORY_TYPE; - if ((x & (XTHAL_MEM_BUFFERABLE | XTHAL_MEM_INTERRUPTIBLE))) - return XTHAL_BAD_MEMORY_TYPE; - } - - return 0; -} -#endif - -#endif // is MPU - -#if defined(__SPLIT__mpu_basic) -/* - * These functions accept encoded access rights, and return 1 if the supplied memory type has the property specified by the function name. - */ -extern int xthal_is_kernel_readable(int accessRights) -{ -#if XCHAL_HAVE_MPU - return is_kernel_readable(accessRights); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -extern int xthal_is_kernel_writeable(int accessRights) -{ -#if XCHAL_HAVE_MPU - return is_kernel_writeable(accessRights); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -extern int xthal_is_kernel_executable(int accessRights) -{ -#if XCHAL_HAVE_MPU - return is_kernel_executable(accessRights); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -extern int xthal_is_user_readable(int accessRights) -{ -#if XCHAL_HAVE_MPU - return is_user_readable(accessRights); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -extern int xthal_is_user_writeable(int accessRights) -{ -#if XCHAL_HAVE_MPU - return is_user_writeable(accessRights); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -extern int xthal_is_user_executable(int accessRights) -{ -#if XCHAL_HAVE_MPU - return is_user_executable(accessRights); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -/* - * These functions accept either an encoded or unencoded memory type, and - * return 1 if the supplied memory type has property specified by the - * function name. - */ -int xthal_is_cacheable(unsigned int mt) -{ -#if XCHAL_HAVE_MPU - return is_cacheable(mt); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -int xthal_is_writeback(unsigned int mt) -{ -#if XCHAL_HAVE_MPU - return is_writeback(mt); -#else - return XTHAL_UNSUPPORTED; -#endif -} - -int xthal_is_device(unsigned int mt) -{ -#if XCHAL_HAVE_MPU - return is_device(mt); -#else - return XTHAL_UNSUPPORTED; -#endif -} -#endif - -/* - * This function converts a bit-wise combination of the XTHAL_MEM_.. constants - * to the corresponding MPU memory type (9-bits). - * - * If none of the XTHAL_MEM_.. bits are present in the argument, then - * bits 4-12 (9-bits) are returned ... this supports using an already encoded - * memoryType (perhaps obtained from an xthal_MPU_entry structure) as input - * to xthal_set_region_attribute(). - * - * This function first checks that the supplied constants are a valid and - * supported combination. If not, it returns XTHAL_BAD_MEMORY_TYPE. - */ -#if defined(__SPLIT__mpu_check) -int xthal_encode_memory_type(unsigned int x) -{ -#if XCHAL_HAVE_MPU - const unsigned int MemoryTypeMask = 0x1ff0; - const unsigned int MemoryFlagMask = 0xffffe000; - /* - * Encodes the memory type bits supplied in an | format (XCHAL_CA_PROCESSOR_CACHE_WRITEALLOC | XCHAL_CA_PROCESSOR_CACHE_WRITEBACK) - */ - unsigned memoryFlags = x & MemoryFlagMask; - if (!memoryFlags) - return (x & MemoryTypeMask) >> XTHAL_AR_WIDTH; - else - { - int chk = check_memory_type(memoryFlags); - if (chk < 0) - return chk; - else - return XTHAL_ENCODE_MEMORY_TYPE(memoryFlags); - } -#else - return XTHAL_UNSUPPORTED; -#endif -} -#endif - -#if defined(__SPLIT__mpu_rmap) - -/* - * Copies the current MPU entry list into 'entries' which - * must point to available memory of at least - * sizeof(xthal_MPU_entry) * XCHAL_MPU_ENTRIES. - * - * This function returns XTHAL_SUCCESS. - * XTHAL_INVALID, or - * XTHAL_UNSUPPORTED. - */ -int xthal_read_map(xthal_MPU_entry* fg_map) -{ -#if XCHAL_HAVE_MPU - unsigned i; - if (!fg_map) - return XTHAL_INVALID; - xthal_read_map_raw(fg_map); - return XTHAL_SUCCESS; -#else - return XTHAL_UNSUPPORTED; -#endif -} - -#if XCHAL_HAVE_MPU - #undef XCHAL_MPU_BGMAP - #define XCHAL_MPU_BGMAP(s,vstart,vend,rights,mtype,x...) XTHAL_MPU_ENTRY(vstart,1,rights,mtype), -const xthal_MPU_entry Xthal_mpu_bgmap[] = { XCHAL_MPU_BACKGROUND_MAP(0) }; -#endif - - -/* - * Copies the MPU background map into 'entries' which must point - * to available memory of at least - * sizeof(xthal_MPU_entry) * XCHAL_MPU_BACKGROUND_ENTRIES. - * - * This function returns XTHAL_SUCCESS. - * XTHAL_INVALID, or - * XTHAL_UNSUPPORTED. - */ -int xthal_read_background_map(xthal_MPU_entry* bg_map) -{ -#if XCHAL_HAVE_MPU - if (!bg_map) - return XTHAL_INVALID; - memcpy(bg_map, Xthal_mpu_bgmap, sizeof(Xthal_mpu_bgmap)); - return XTHAL_SUCCESS; -#else - return XTHAL_UNSUPPORTED; -#endif -} -#endif -/* - * Writes the map pointed to by 'entries' to the MPU. Before updating - * the map, it commits any uncommitted - * cache writes, and invalidates the cache if necessary. - * - * This function does not check for the correctness of the map. Generally - * xthal_check_map() should be called first to check the map. - * - * If n == 0 then the existing map is cleared, and no new map is written - * (useful for returning to reset state) - * - * If (n > 0 && n < XCHAL_MPU_ENTRIES) then a new map is written with - * (XCHAL_MPU_ENTRIES-n) padding entries added to ensure a properly ordered - * map. The resulting foreground map will be equivalent to the map vector - * fg, but the position of the padding entries should not be relied upon. - * - * If n == XCHAL_MPU_ENTRIES then the complete map as specified by fg is - * written. - * - * xthal_write_map() disables the MPU foreground map during the MPU - * update and relies on the background map. - * - * As a result any interrupt that does not meet the following conditions - * must be disabled before calling xthal_write_map(): - * 1) All code and data needed for the interrupt must be - * mapped by the background map with sufficient access rights. - * 2) The interrupt code must not access the MPU. - * - */ -#if defined(__SPLIT__mpu_wmap) -void xthal_write_map(const xthal_MPU_entry* fg, unsigned int n) -{ -#if XCHAL_HAVE_MPU - unsigned int cacheadrdis = xthal_calc_cacheadrdis(fg, n); - xthal_dcache_all_writeback_inv(); - xthal_icache_all_invalidate(); - xthal_write_map_raw(fg, n); - write_cacheadrdis(cacheadrdis); - isync(); // ditto -#endif -} -#endif - -#if defined(__SPLIT__mpu_check) -/* - * Checks if entry vector 'fg' of length 'n' is a valid MPU access map. - * Returns: - * XTHAL_SUCCESS if valid, - * XTHAL_OUT_OF_ENTRIES - * XTHAL_MAP_NOT_ALIGNED, - * XTHAL_BAD_ACCESS_RIGHTS, - * XTHAL_OUT_OF_ORDER_MAP, or - * XTHAL_UNSUPPORTED if config doesn't have an MPU. - */ -int xthal_check_map(const xthal_MPU_entry* fg, unsigned int n) -{ -#if XCHAL_HAVE_MPU - return check_map(fg, XCHAL_MPU_ENTRIES, Xthal_mpu_bgmap); -#else - return XTHAL_UNSUPPORTED; -#endif -} -#endif - -#if defined(__SPLIT__mpu_basic) -/* - * Returns the MPU entry that maps 'vaddr'. If 'infgmap' is non-NULL then it is - * set to 1 if 'vaddr' is mapped by the foreground map, or 0 if 'vaddr' - * is mapped by the background map. - */ -extern xthal_MPU_entry xthal_get_entry_for_address(void* paddr, int* infgmap) - { -#if XCHAL_HAVE_MPU - xthal_MPU_entry e; - unsigned int p; - __asm__ __volatile__("PPTLB %0, %1\n\t" : "=a" (p) : "a" (paddr)); - if ((p & 0x80000000)) - { - if (infgmap) - *infgmap = 1; - e.at = (p & 0x1fffff); - __asm__ __volatile__("RPTLB0 %0, %1\n\t" : "=a" (e.as) : "a" (p & 0x1f)); - return e; - } - else - { - int i; - if (infgmap) - *infgmap = 0; - for (i = XCHAL_MPU_BACKGROUND_ENTRIES - 1; i > 0; i--) - { - if (XTHAL_MPU_ENTRY_GET_VSTARTADDR(Xthal_mpu_bgmap[i]) <= (unsigned) paddr) - { - return Xthal_mpu_bgmap[i]; - } - } // in background map - return Xthal_mpu_bgmap[0]; - } -#else - xthal_MPU_entry e; - return e; -#endif - } -#endif -/* - * This function is intended as an MPU specific version of - * xthal_set_region_attributes(). xthal_set_region_attributes() calls - * this function for MPU configurations. - * - * This function sets the attributes for the region [vaddr, vaddr+size) - * in the MPU. - * - * Depending on the state of the MPU this function will require from - * 0 to 3 unused MPU entries. - * - * This function typically will move, add, and subtract entries from - * the MPU map during execution, so that the resulting map may - * be quite different than when the function was called. - * - * This function does make the following guarantees: - * 1) The MPU access map remains in a valid state at all times - * during its execution. - * 2) At all points during (and after) completion the memoryType - * and accessRights remain the same for all addresses - * that are not in the range [vaddr, vaddr+size). - * 3) If XTHAL_SUCCESS is returned, then the range - * [vaddr, vaddr+size) will have the accessRights and memoryType - * specified. - * - * The accessRights parameter should be either a 4-bit value corresponding - * to an MPU access mode (as defined by the XTHAL_AR_.. constants), or - * XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS. - * - * The memoryType parameter should be either a bit-wise or-ing of XTHAL_MEM_.. - * constants that represent a valid MPU memoryType, a 9-bit MPU memoryType - * value, or XTHAL_MPU_USE_EXISTING_MEMORY_TYPE. - * - * In addition to the error codes that xthal_set_region_attribute() - * returns, this function can also return: XTHAL_BAD_ACCESS_RIGHTS - * (if the access rights bits map to an unsupported combination), or - * XTHAL_OUT_OF_ENTRIES (if there are not enough unused MPU entries). - * - * If this function is called with an invalid MPU map, then this function - * will return one of the codes that is returned by xthal_check_map(). - * - * The flag, XTHAL_CAFLAG_EXPAND, is not supported. - * - */ -#if defined(__SPLIT__mpu_attributes) -int xthal_mpu_set_region_attribute(void* vaddr, unsigned size, int accessRights, int memoryType, unsigned flags) -{ -#if XCHAL_HAVE_MPU - unsigned int first; - unsigned int last; - int rv; - - if (flags & XTHAL_CAFLAG_EXPAND) - return XTHAL_UNSUPPORTED; - if (size == 0) - return XTHAL_ZERO_SIZED_REGION; - first = (unsigned) vaddr; - last = first + size; - if (last != 0xFFFFFFFF) - last--; - if (first >= last) - return XTHAL_INVALID_ADDRESS_RANGE; // Wraps around - - if (accessRights & XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS) - { - accessRights = XTHAL_MPU_ENTRY_GET_ACCESS(xthal_get_entry_for_address(vaddr, 0)); - } - else - { - accessRights = encode_access_rights(accessRights); - if (accessRights < 0) - return XTHAL_BAD_ACCESS_RIGHTS; - } - if (memoryType & XTHAL_MPU_USE_EXISTING_MEMORY_TYPE) - { - memoryType = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(xthal_get_entry_for_address(vaddr, 0)); - } - else - { - if (memoryType & 0xffffe000) // Tests if any of the XTHAL MEM flags are present - memoryType = xthal_encode_memory_type(memoryType); - else - if (memoryType & 0xfffffe00) // Tests if any of bits from 9 to 13 are set indicating - // that the memoryType was improperly shifted - // we flag this as an error - return XTHAL_BAD_MEMORY_TYPE; - if (memoryType < 0) - return XTHAL_BAD_MEMORY_TYPE; - } - if (flags & XTHAL_CAFLAG_EXACT) - if (!mpu_aligned(first) || !mpu_aligned(last + 1)) - return XTHAL_INEXACT; - - first = mpu_align(first, (flags & XTHAL_CAFLAG_NO_PARTIAL)); - if (last != 0xffffffff) - { - last = mpu_align(last + 1, !(flags & XTHAL_CAFLAG_NO_PARTIAL)); - if (first >= last) - return ((flags & XTHAL_CAFLAG_NO_PARTIAL) ? XTHAL_ZERO_SIZED_REGION : 0); - } - rv = safe_add_region(first, last, accessRights, memoryType, !(flags & XTHAL_CAFLAG_NO_AUTO_WB), - !(flags & XTHAL_CAFLAG_NO_AUTO_INV)); - isync(); - return rv; -#else - return XTHAL_UNSUPPORTED; -#endif -} -#endif - - -#if defined(__SPLIT__mpu_cachedis) - -inline static unsigned int max2(unsigned int a, unsigned int b) - { - if (a>b) - return a; - else - return b; - } - -inline static unsigned int mask_cachedis(unsigned int current, int first_region, - int last_region) - { - unsigned int x; - x = ((1 << (last_region - first_region + 1)) - 1) << first_region; - current &= ~x; - return current; - } - -/* - * xthal_calc_cacheadrdis() computes the value that should be written - * to the CACHEADRDIS register. The return value has bits 0-7 set according as: - * bit n: is zero if any part of the region [512MB * n, 512MB* (n-1)) is cacheable. - * is one if NO part of the region [512MB * n, 512MB* (n-1)) is cacheable. - * - * This function looks at both the loops through both the foreground and background maps - * to find cacheable area. Once one cacheable area is found in a 512MB region, then we - * skip to the next 512MB region. - */ -unsigned int xthal_calc_cacheadrdis(const xthal_MPU_entry* fg, unsigned int num_entries) - { -#if XCHAL_HAVE_MPU - unsigned int cachedis = 0xff; - int fg_index = num_entries - 1; - int bg_index = XCHAL_MPU_BACKGROUND_ENTRIES - 1; - int working_region = 7; - int ending_region; - unsigned int vaddr = 0xffffffff; - while (bg_index >= 0 || fg_index >= 0) - { - if ((fg_index >= 0 && XTHAL_MPU_ENTRY_GET_VALID(fg[fg_index]))) - { - vaddr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[fg_index]); - ending_region = vaddr >> 29; - if (ending_region <= working_region) - { - unsigned int mt = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(fg[fg_index]); - if (is_cacheable(mt)) - { - cachedis = mask_cachedis(cachedis, ending_region, - working_region); - /* Optimize since we have found one cacheable entry in the region ... no need to look for more */ - if (ending_region == 0) - return cachedis; - else - working_region = ending_region - 1; - } - else - if (vaddr & 0x1fffffff) // If vaddr is on a 512MB region we want to move to the next region - working_region = ending_region; - else - working_region = ending_region - 1; - } - } - else if ((bg_index >= 0) - && ((fg_index <= 0) - || XTHAL_MPU_ENTRY_GET_VALID(fg[fg_index-1]))&& vaddr) - { - unsigned int caddr; - unsigned int low_addr = ( - (fg_index >= 0) ? - (XTHAL_MPU_ENTRY_GET_VSTARTADDR(fg[fg_index])) : - 0); - /* First skip any background entries that start after the address of interest */ - while ((caddr = XTHAL_MPU_ENTRY_GET_VSTARTADDR(Xthal_mpu_bgmap[bg_index])) >= vaddr) - bg_index--; - do - { - caddr = max2(XTHAL_MPU_ENTRY_GET_VSTARTADDR(Xthal_mpu_bgmap[bg_index]), - low_addr); - ending_region = caddr >> 29; - if (ending_region <= working_region) - { - unsigned int mt = XTHAL_MPU_ENTRY_GET_MEMORY_TYPE( - Xthal_mpu_bgmap[bg_index]); - if (is_cacheable(mt)) - { - cachedis = mask_cachedis(cachedis, ending_region, - working_region); - /* Optimize since we have found one cacheable entry in the region ... - * no need to look for more */ - if (ending_region == 0) - return cachedis; // we are done - else - working_region = ending_region - 1; - } - else - if (caddr & 0x1fffffff) - working_region = ending_region; - else - working_region = ending_region - 1; - } - bg_index--; - }while (caddr > low_addr); - vaddr = caddr; - } - fg_index--; - if (!vaddr) - break; - } - return cachedis; -#else - return 0; -#endif - } -#endif - -#if defined(__SPLIT__mpu_basic) -void (*_xthal_assert_handler)(); -/* Undocumented internal testing function */ -extern void _xthal_set_assert_handler(void (*handler)()) -{ -#if XCHAL_HAVE_MPU - _xthal_assert_handler = handler; -#endif -} -#endif diff --git a/src/arch/xtensa/hal/mpu_asm.S b/src/arch/xtensa/hal/mpu_asm.S deleted file mode 100644 index a2a544bd354d..000000000000 --- a/src/arch/xtensa/hal/mpu_asm.S +++ /dev/null @@ -1,78 +0,0 @@ -// -// mpu_asm.S - Assembler routine(s) for the MPU -// -// $Id$ - -// Copyright (c) 2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - -#if defined(__SPLIT__write_map_raw) ||\ - defined(__SPLIT__write_map_raw_nw) - -/* - void xthal_write_map_raw( const struct xthal_MPU_entry* map, unsigned n); - - Updates the MPU with the MPU entries provided: - map pointer to array of MPU entries - n number of entries in array (0 <= n <= XCHAL_MPU_ENTRIES) - - The entries provided must have monotonically increasing addresses. - This function otherwise orders its updates to ensure the MPU always has - all its entries in monotonically increasing sequence. - - on entry - a2 => vector of MPU entries to write - a3 => number of entries to write - a4-a7 => destroyed -*/ - -DECLFUNC (xthal_write_map_raw) - abi_entry - mpu_write_map a2, a3, a4, a5, a6, a7 - abi_return - endfunc - -#endif - -/* - void xthal_read_map_raw(struct xthal_MPU_entry* map); - - Reads the current map from the MPU and puts it the vector - pointed to by map. - - a2 => vector of MPU entries where the current MPU state is copied - a3-a4 => destroyed -*/ - -#if defined(__SPLIT__read_map_raw) ||\ - defined(__SPLIT__read_map_raw_nw) - -DECLFUNC (xthal_read_map_raw) - abi_entry - mpu_read_map a2, a3, a4 - abi_return - endfunc - -#endif - diff --git a/src/arch/xtensa/hal/set_region_translate.c b/src/arch/xtensa/hal/set_region_translate.c deleted file mode 100644 index 27ed6b80a50b..000000000000 --- a/src/arch/xtensa/hal/set_region_translate.c +++ /dev/null @@ -1,534 +0,0 @@ -/* - * Copyright (c) 2004-2014 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include - -#if XCHAL_HAVE_XEA2 && (!XCHAL_HAVE_MPU) -/* - * C-stubs to issue the tlb related instructions (with dsync and isync's if needed). - * - */ -static inline void write_dtlb_entry(unsigned vpn_way, unsigned ppn_ca) { - __asm__ __volatile__("wdtlb %1, %0; dsync\n\t" - : : "r" (vpn_way), "r" (ppn_ca) ); -} - -static inline void write_itlb_entry(unsigned vpn_way, unsigned ppn_ca) { - __asm__ __volatile__("witlb %1, %0; isync\n\t" - : : "r" (vpn_way), "r" (ppn_ca) ); -} - -static inline unsigned read_dtlb1_entry(unsigned addr) { - unsigned long tmp; - __asm__ __volatile__("rdtlb1 %0, %1\n\t" - : "=a" (tmp) - : "a" (addr)); - return tmp; -} - -static inline unsigned read_itlb1_entry(unsigned addr) { - unsigned long tmp; - __asm__ __volatile__("ritlb1 %0, %1\n\t" - : "=a" (tmp) - : "a" (addr)); - return tmp; -} - -static inline unsigned probe_dtlb(unsigned addr) { - unsigned long tmp; - __asm__ __volatile__("pdtlb %0, %1\n\t" - : "=a" (tmp) - : "a" (addr)); - return tmp; -} - -static inline unsigned probe_itlb(unsigned addr) { - unsigned long tmp; - __asm__ __volatile__("pitlb %0, %1\n\t" - : "=a" (tmp) - : "a" (addr)); - return tmp; -} - -static inline void invalidate_dtlb_entry(unsigned addr) { - __asm__ __volatile__("idtlb %0; dsync \n\t" - : : "a" (addr)); -} - -static inline void invalidate_itlb_entry(unsigned addr) { - __asm__ __volatile__("iitlb %0 ; isync\n\t" - : : "a" (addr)); -} - -static inline unsigned read_dtlbcfg() { - unsigned long tmp; - __asm__ __volatile__("rsr.dtlbcfg %0\n\t" - : "=a" (tmp)); - return tmp; -} - -static inline unsigned read_itlbcfg() { - unsigned long tmp; - __asm__ __volatile__("rsr.itlbcfg %0\n\t" - : "=a" (tmp)); - return tmp; -} - -#endif - -/* - * xthal_set_region_translation_raw is a quick and simple function - * to set both physical address and cache attribute for - * a 512MB region at . - * - * Parameters: - * void* vaddr 512MB aligned pointer representing the start of virtual address region - * void* paddr 512MB aligned pointer representing the start of physical address region - * unsigned cattr 4 bit value encoding the caching properties and rights (MMU only). - * - * returns 0 (XCHAL_SUCCESS) if successful - * returns non zero (XCHAL_UNSUPPORTED) on failure - * - * This function has the following limitations: - * - * 1) Requires either the Region Translation Option or a v3 MMU running in the default mode (with spanning way) - * 2) It does no error checking. - * 3) Deals with one 512MB region (vaddr and paddr are required to be 512MB aligned although that is not explicitly checked) - * 4) It requires the caller to do any cache flushing that is needed - * 5) Doesn't support mnemonically setting the 'rights' (rwx, rw, ... ) bit on the MMU - * 6) It is illegal to change the mapping of the region containing the current PC (not checked) - * - */ -int xthal_set_region_translation_raw(void *vaddr, void *paddr, unsigned cattr) { -#if XCHAL_HAVE_MPU - return XTHAL_UNSUPPORTED; -#else -#if XCHAL_HAVE_XEA2 -#if XCHAL_HAVE_XLT_CACHEATTR || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) -# if XCHAL_HAVE_XLT_CACHEATTR - unsigned vpn_way = (unsigned)vaddr; -# else - unsigned vpn_way = ((unsigned) vaddr & 0xFFFFFFF0) + XCHAL_SPANNING_WAY; -# endif - unsigned ppn_ca = ((unsigned) paddr & 0xFFFFFFF0) + (cattr & 0xF); - write_dtlb_entry(vpn_way, ppn_ca); - write_itlb_entry(vpn_way, ppn_ca); - return XTHAL_SUCCESS; -#else - return XTHAL_UNSUPPORTED; -#endif -#else - return XTHAL_UNSUPPORTED; -#endif -#endif -} - -/* - * xthal_v2p() takes a virtual address as input, and if that virtual address is mapped to a physical address - * by the MMU, it returns the: - * a) corresponding physical address - * b) the tlb way that is used to translate the address - * c) cache attribute for translation - * - * Parameters: - * void* vaddr A pointer representing the virtual address (there are no alignment requirements for this address) - * void** paddr This value can be 0, or can point to a pointer variable which will be updated to contain the physical address - * unsigned* way This value can be 0, or can point to an unsigned variable which will be updated to contain the TLB way. - * unsigned* cattr This value can be 0, or can point to an unsigned variable which will be updated to contain the cache attr - * For MPU configurations bits 0..3 hold the access rights and bits 4..8 hold the encoded memory type - * - * Returns 0 (XCHAL_SUCCESS) if successful - * XTHAL_NO_MAPPING if there is no current mapping for the virtual address - * XCHAL_UNSUPPORTED if unsupported - * - * Limitations: - * Assumes that architecture variable DVARWAY56 is "Variable" - * Uses the D-TLBS for the translation ... assumption is that ITLB's have same mappings - */ -int xthal_v2p(void* vaddr, void** paddr, unsigned *way, unsigned* cattr) { -#if XCHAL_HAVE_XEA2 -#if XCHAL_HAVE_MPU - if (paddr) - *paddr = vaddr; - if (way) - *way = 0; - if (cattr) - { - struct xthal_MPU_entry x = xthal_get_entry_for_address(vaddr, 0); - *cattr = XTHAL_MPU_ENTRY_GET_ACCESS(x) | XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(x) << XTHAL_AR_WIDTH; - } - return XTHAL_SUCCESS; -#else - unsigned long probe = probe_dtlb((unsigned) vaddr); -#if !XCHAL_HAVE_PTP_MMU - if (!(0x1 & probe)) - return XTHAL_NO_MAPPING; - if (way) - *way = 1; - if (paddr || cattr) { - unsigned long temp; - temp = read_dtlb1_entry(probe); - unsigned ppn = 0xe0000000 & temp; - unsigned att = 0xf & temp; - if (paddr) - *paddr = ((void*) (ppn + (((unsigned) vaddr) & 0x1fffffff))); - if (cattr) - *cattr = att; - } -#else - { - unsigned iway; - if (!(0x10 & probe)) - return XTHAL_NO_MAPPING; - iway = 0xf & probe; - if (way) - *way = iway; - if (paddr || cattr) { - unsigned temp; - unsigned ppn; - unsigned ppn1; - unsigned dtlbcfg = read_dtlbcfg(); - temp = read_dtlb1_entry(probe); - unsigned att = 0xf & temp; - if (cattr) - *cattr = att; - if (paddr) - switch (iway) // followin code derived from fig 4-40 from ISA MMU Option Data (at) Format for RxTLB1 - { /* 4k pages */ - case 0: - case 1: - case 2: - case 3: - case 7: - case 8: - case 9: - ppn = 0xfffff000; // 4k pages - break; - case 4: { - switch ((dtlbcfg & (0x3 << 16)) >> 16) // bits 16 & 17 - { - case 0: // 1MB pages - ppn = 0xfff00000; - break; - case 1: // 4MB pages - ppn = 0xffc00000; - break; - case 2: // 16MB pages - ppn = 0xff000000; - break; - case 3: // 64MB pages - ppn = 0xfc000000; - break; - default: - return XTHAL_UNSUPPORTED; - } - } - break; - case 5: - if ((dtlbcfg & (1 << 20))) - ppn = 0xf8000000; // 128MB pages - else - ppn = 0xf0000000; // 256MB pages - break; - case 6: - if ((dtlbcfg & (1 << 24))) - ppn = 0xe0000000; // 512MB pages - else - ppn = 0xf0000000; // 256MB pages - break; - default: - return XTHAL_UNSUPPORTED; - break; - } - ppn1 = ppn & temp; - *paddr = ((void*) (ppn1 + (((unsigned) vaddr) & (~ppn)))); - } - } -#endif - return XTHAL_SUCCESS; -#endif -#else - return XTHAL_UNSUPPORTED; -#endif -} - -/* these constants borrowed from xthal_set_region_attribute */ -# if XCHAL_HAVE_PTP_MMU -# define CA_BYPASS XCHAL_CA_BYPASS -# define CA_WRITETHRU XCHAL_CA_WRITETHRU -# define CA_WRITEBACK XCHAL_CA_WRITEBACK -# define CA_WRITEBACK_NOALLOC XCHAL_CA_WRITEBACK_NOALLOC -# define CA_ILLEGAL XCHAL_CA_ILLEGAL -# else -/* Hardcode these, because they get remapped when caches or writeback not configured: */ -# define CA_BYPASS 2 -# define CA_WRITETHRU 1 -# define CA_WRITEBACK 4 -# define CA_WRITEBACK_NOALLOC 5 -# define CA_ILLEGAL 15 -# endif - -/* internal function that returns 1 if the supplied attr indicates the - * cache is in writeback mode. - */ -static inline int is_writeback(unsigned attr) { -#if XCHAL_HAVE_XLT_CACHEATTR - return attr == CA_WRITEBACK || attr == CA_WRITEBACK_NOALLOC; -#endif -#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY - return (attr | 0x3) == CA_WRITEBACK; -#endif - return -1; /* unsupported */ -} - -/* - * xthal_set_region_translation() - * - * Establishes a new mapping (with the supplied cache attributes) - * between a virtual address region, and a physical address region. - * - * This function is only supported with following processor configurations: - * a) Region Translation - * b) v3 MMU with a spanning way running in the default mode - * - * If the specified memory range exactly covers a series - * of consecutive 512 MB regions, the address mapping and cache - * attributes of these regions are updated. - * - * If this is not the case, e.g. if either or both the - * start and end of the range only partially cover a 512 MB - * region, one of three results are possible: - * - * 1. By default, the cache attribute of all regions - * covered, even just partially, is changed to - * the requested attribute. - * - * 2. If the XTHAL_CAFLAG_EXACT flag is specified, - * a non-zero error code is returned. - * - * 3. If the XTHAL_CAFLAG_NO_PARTIAL flag is specified - * (but not the EXACT flag), only regions fully - * covered by the specified range are updated with - * the requested attribute. - * - * CACHE HANDLING - * - * This function automatically writes back dirty data before remapping a - * virtual address region. - * - * This writeback is done safely, ie. by first switching to writethrough - * mode, and then invoking xthal_dcache_all_writeback(). Such a sequence is - * necessary to ensure there is no longer any dirty data in the memory region by the time - * this function returns, even in the presence of interrupts, speculation, etc. - * This automatic write-back can be disabled using the XTHAL_CAFLAG_NO_AUTO_WB flag. - * - * This function also invalidates the caches after remapping a region because the - * cache could contain (now invalid) data from the previous mapping. - * This automatic invalidate can be disabled using the XTHAL_CAFLAG_NO_AUTO_INV flag. - * - * Parameters: - * vaddr starting virtual address of region of memory - * - * paddr starting physical address for the mapping (this should be 512MB aligned to vaddr such that ((vaddr ^ paddr) & 0x10000000 == 0) - * - * size number of bytes in region of memory - * (see above, SPECIFYING THE MEMORY REGION) - * - * cattr cache attribute (encoded); - * typically taken from compile-time HAL constants - * XCHAL_CA_{BYPASS, WRITETHRU, WRITEBACK[_NOALLOC], ILLEGAL} - * (defined in ); - * in XEA1, this corresponds to the value of a nibble - * in the CACHEATTR register; - * in XEA2, this corresponds to the value of the - * cache attribute (CA) field of each TLB entry - * - * flags bitwise combination of flags XTHAL_CAFLAG_* - * - * XTHAL_CAFLAG_EXACT - If this flag is present, - * the mapping will only be done if the specified - * region exactly matches on or more 512MB pages otherwise - * XCHAL_INEXACT is returned (and no mapping is done). - * - * XTHAL_CAFLAG_NO_PARTIAL - If this flag is specified, then - * only pages that are completely covered by the specified region - * are affected. If this flag is specified, and no pages are completely - * covered by the region, then no pages are affected and XCHAL_NO_REGIONS_COVERED - * is returned. - * - * - * - * Returns: - * XCHAL_SUCCESS - successful, or size is zero - * - * XCHAL_NO_REGIONS_COVERED - XTHAL_CAFLAG_NO_PARTIAL flag specified and address range - * is valid with a non-zero size, however no 512 MB region (or page) - * is completely covered by the range - * - * XCHAL_INEXACT XTHAL_CAFLAG_EXACT flag specified, and address range does - * not exactly specify a 512 MB region (or page) - * - * XCHAL_INVALID_ADDRESS invalid address range specified (wraps around the end of memory) - * - * XCHAL_ADDRESS_MISALIGNED virtual and physical addresses are not aligned (512MB) - * - * - * XCHAL_UNSUPPORTED_ON_THIS_ARCH function not supported in this processor configuration - */ -int xthal_set_region_translation(void* vaddr, void* paddr, unsigned size, - unsigned cattr, unsigned flags) { -#if XCHAL_HAVE_XEA2 & !XCHAL_HAVE_MPU -#if XCHAL_HAVE_XLT_CACHEATTR || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - const unsigned CA_MASK = 0xF; - const unsigned addr_mask = 0x1fffffff; - const unsigned addr_shift = 29; - unsigned vaddr_a = (unsigned) vaddr; - unsigned paddr_a = (unsigned) paddr; - unsigned end_vaddr; - unsigned end_paddr; - unsigned start_va_reg; - unsigned end_va_reg; - unsigned start_pa_reg; - unsigned icache_attr = 0; - int rv; - int i; - if (size == 0) - return XTHAL_SUCCESS; - if ((vaddr_a & addr_mask) ^ (paddr_a & addr_mask)) - return XTHAL_ADDRESS_MISALIGNED; - icache_attr = cattr & CA_MASK; -#if (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - // if using the mmu in spanning way mode then 'and in' the R, RX, RW, RWX bits - if ((cattr & 0x40000000) && (icache_attr < 12)) - icache_attr = icache_attr & ((cattr & 0xF0) >> 4); -#endif - end_vaddr = vaddr_a + size - 1; - end_paddr = paddr_a + size - 1; - - if ((end_vaddr < vaddr_a) || (end_paddr < paddr_a)) - return XTHAL_INVALID_ADDRESS; - start_va_reg = vaddr_a >> addr_shift; - end_va_reg = end_vaddr >> addr_shift; - start_pa_reg = paddr_a >> addr_shift; - if ((flags & XTHAL_CAFLAG_EXACT) - && ((size & addr_mask) || (vaddr_a & addr_mask) - || (paddr_a & addr_mask))) - return XTHAL_INEXACT; - if (flags & XTHAL_CAFLAG_NO_PARTIAL) { - if (vaddr_a & addr_mask) { - start_va_reg++; - start_pa_reg++; - } - if ((end_vaddr & addr_mask) != addr_mask) - end_va_reg--; - } - if (end_va_reg < start_va_reg) - return XTHAL_NO_REGIONS_COVERED; - /* - * Now we need to take care of any uncommitted cache writes in the affected regions - * 1) first determine if any regions are in write back mode - * 2) change those pages to write through - * 3) force the writeback of d-cache by calling xthal_dcach_all_writeback() - */ -#if ((XCHAL_DCACHE_SIZE >0) && XCHAL_DCACHE_IS_WRITEBACK) - if (!(flags & XTHAL_CAFLAG_NO_AUTO_WB)) { - unsigned old_cache_attr = xthal_get_cacheattr(); - unsigned cachewrtr = old_cache_attr; - unsigned need_safe_writeback = 0; - for (i = start_va_reg; i <= end_va_reg; i++) { - unsigned sh = i << 2; - unsigned old_attr = (old_cache_attr >> sh) & CA_MASK; - if (is_writeback(old_attr)) { - need_safe_writeback = 1; - cachewrtr = (cachewrtr & ~(CA_MASK << sh)) - | (CA_WRITETHRU << sh); - } - } - - if (need_safe_writeback) { - xthal_set_cacheattr(cachewrtr); /* set to writethru first, to safely writeback any dirty data */ - xthal_dcache_all_writeback(); /* much quicker than scanning entire 512MB region(s) */ - } - } -#endif - /* Now we set the affected region translations */ - for (i = start_va_reg; i <= end_va_reg; i++) { - if ((rv = xthal_set_region_translation_raw( - (void*) ((start_va_reg++) << addr_shift), - (void*) ((start_pa_reg++) << addr_shift), icache_attr))) - return rv; - } - - /* - * Now we need to invalidate the cache in the affected regions. For now invalidate entire cache, - * but investigate if there are faster alternatives on some architectures. - */ - if (!(flags & XTHAL_CAFLAG_NO_AUTO_INV)) { -# if XCHAL_DCACHE_SIZE > 0 - xthal_dcache_all_writeback_inv(); /* some areas in memory (outside the intended region) may have uncommitted - data so we need the writeback_inv(). */ -#endif -#if XCHAL_ICACHE_SIZE >0 - xthal_icache_all_invalidate(); -#endif - } - return XTHAL_SUCCESS; -#else - return XTHAL_UNSUPPORTED; -#endif -#else - return XTHAL_UNSUPPORTED; -#endif -} - -/* xthal_invalidate_region() - * invalidates the tlb entry for the specified region. - * - * This function is only supported on processor configurations - * with a v3 MMU with a spanning way. - * - * Parameter - * vaddr - virtual address of region to invalidate (512MB aligned) - * - * returns: - * XCHAL_SUCCESS - Success - * XCHAL_UNSUPPORTED_ON_THIS_ARCH - Unsupported - * - */ -int xthal_invalidate_region(void* vaddr) { -#if XCHAL_HAVE_XEA2 & !XCHAL_HAVE_MPU -#if (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - unsigned addr = (unsigned) vaddr; - if (addr & 0x1fffffff) - return XTHAL_INVALID_ADDRESS; - addr += XCHAL_SPANNING_WAY; - invalidate_dtlb_entry(addr); - invalidate_itlb_entry(addr); - return XTHAL_SUCCESS; -#else - return XTHAL_UNSUPPORTED; -#endif -#else - return XTHAL_UNSUPPORTED; -#endif -} - diff --git a/src/arch/xtensa/hal/state.c b/src/arch/xtensa/hal/state.c deleted file mode 100644 index 316ddb4e8298..000000000000 --- a/src/arch/xtensa/hal/state.c +++ /dev/null @@ -1,321 +0,0 @@ -// -// processor_state.c - processor state management routines -// - -// Copyright (c) 2005-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - - -//---------------------------------------------------------------------- - -#if defined(__SPLIT__extra_size) -// space for "extra" (user special registers and non-coprocessor TIE) state: -const unsigned int Xthal_extra_size = XCHAL_NCP_SA_SIZE; - -#elif defined(__SPLIT__extra_align) -const unsigned int Xthal_extra_align = XCHAL_NCP_SA_ALIGN; - -#elif defined(__SPLIT__cpregs_size) -// space for state of TIE coprocessors: -const unsigned int Xthal_cpregs_size[8] = - { - XCHAL_CP0_SA_SIZE, - XCHAL_CP1_SA_SIZE, - XCHAL_CP2_SA_SIZE, - XCHAL_CP3_SA_SIZE, - XCHAL_CP4_SA_SIZE, - XCHAL_CP5_SA_SIZE, - XCHAL_CP6_SA_SIZE, - XCHAL_CP7_SA_SIZE - }; - -#elif defined(__SPLIT__cpregs_align) -const unsigned int Xthal_cpregs_align[8] = - { - XCHAL_CP0_SA_ALIGN, - XCHAL_CP1_SA_ALIGN, - XCHAL_CP2_SA_ALIGN, - XCHAL_CP3_SA_ALIGN, - XCHAL_CP4_SA_ALIGN, - XCHAL_CP5_SA_ALIGN, - XCHAL_CP6_SA_ALIGN, - XCHAL_CP7_SA_ALIGN - }; - -#elif defined(__SPLIT__cp_names) -const char * const Xthal_cp_names[8] = - { - XCHAL_CP0_NAME, - XCHAL_CP1_NAME, - XCHAL_CP2_NAME, - XCHAL_CP3_NAME, - XCHAL_CP4_NAME, - XCHAL_CP5_NAME, - XCHAL_CP6_NAME, - XCHAL_CP7_NAME - }; - -#elif defined(__SPLIT__all_extra_size) -// total save area size (extra + all coprocessors + min 16-byte alignment everywhere) -const unsigned int Xthal_all_extra_size = XCHAL_TOTAL_SA_SIZE; - -#elif defined(__SPLIT__all_extra_align) -// maximum required alignment for the total save area (this might be useful): -const unsigned int Xthal_all_extra_align = XCHAL_TOTAL_SA_ALIGN; - -#elif defined(__SPLIT__num_coprocessors) -// number of coprocessors starting contiguously from zero -// (same as Xthal_cp_max, but included for Tornado2): -const unsigned int Xthal_num_coprocessors = XCHAL_CP_MAX; - -#elif defined(__SPLIT__cp_num) -// actual number of coprocessors: -const unsigned char Xthal_cp_num = XCHAL_CP_NUM; - -#elif defined(__SPLIT__cp_max) -// index of highest numbered coprocessor, plus one: -const unsigned char Xthal_cp_max = XCHAL_CP_MAX; - -// index of highest allowed coprocessor number, per cfg, plus one: -//const unsigned char Xthal_cp_maxcfg = XCHAL_CP_MAXCFG; - -#elif defined(__SPLIT__cp_mask) -// bitmask of which coprocessors are present: -const unsigned int Xthal_cp_mask = XCHAL_CP_MASK; - -#elif defined(__SPLIT__cp_id_mappings) -// Coprocessor ID from its name - -# ifdef XCHAL_CP0_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP0_IDENT) = 0; -# endif -# ifdef XCHAL_CP1_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP1_IDENT) = 1; -# endif -# ifdef XCHAL_CP2_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP2_IDENT) = 2; -# endif -# ifdef XCHAL_CP3_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP3_IDENT) = 3; -# endif -# ifdef XCHAL_CP4_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP4_IDENT) = 4; -# endif -# ifdef XCHAL_CP5_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP5_IDENT) = 5; -# endif -# ifdef XCHAL_CP6_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP6_IDENT) = 6; -# endif -# ifdef XCHAL_CP7_IDENT -const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP7_IDENT) = 7; -# endif - -#elif defined(__SPLIT__cp_mask_mappings) -// Coprocessor "mask" (1 << ID) from its name - -# ifdef XCHAL_CP0_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP0_IDENT) = (1 << 0); -# endif -# ifdef XCHAL_CP1_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP1_IDENT) = (1 << 1); -# endif -# ifdef XCHAL_CP2_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP2_IDENT) = (1 << 2); -# endif -# ifdef XCHAL_CP3_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP3_IDENT) = (1 << 3); -# endif -# ifdef XCHAL_CP4_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP4_IDENT) = (1 << 4); -# endif -# ifdef XCHAL_CP5_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP5_IDENT) = (1 << 5); -# endif -# ifdef XCHAL_CP6_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP6_IDENT) = (1 << 6); -# endif -# ifdef XCHAL_CP7_IDENT -const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP7_IDENT) = (1 << 7); -# endif - -//---------------------------------------------------------------------- - -#elif defined(__SPLIT__init_mem_extra) -// CMS: I have made the assumptions that 0's are safe initial -// values. That may be wrong at some point. -// -// initialize the extra processor -void -xthal_init_mem_extra(void *address) -/* not clear that it is safe to call memcpy and also not clear - that performance is important. */ -{ - unsigned int *ptr; - unsigned int *end; - - ptr = (unsigned int *)address; - end = (unsigned int *)((int)address + XCHAL_NCP_SA_SIZE); - while( ptr < end ) - { - *ptr++ = 0; - } -} - -#elif defined(__SPLIT__init_mem_cp) -// initialize the TIE coprocessor -void -xthal_init_mem_cp(void *address, int cp) -{ - unsigned int *ptr; - unsigned int *end; - - if( cp <= 7 ) - { - end = (unsigned int *)((int)address + Xthal_cpregs_size[cp]); - ptr = (unsigned int *)address; - while( ptr < end ) - { - *ptr++ = 0; - } - } -} - -#endif /*splitting*/ - - -/* Nothing implemented below this point. */ -/************************************************************************/ - -// save all extra+cp processor state (NOT IMPLEMENTED) -/*void xthal_save_all_extra(void *base) -{ - xthal_save_extra(base); - ... here we need to iterate over configured coprocessor register files ... -// xthal_save_cpregs(base+XCHAL_NCP_SA_SIZE, 0); -}*/ - -// restore all extra+cp processor state (NOT IMPLEMENTED) -/*void xthal_restore_all_extra(void *base) -{ - xthal_restore_extra(base); - ... here we need to iterate over configured coprocessor register files ... -// xthal_restore_cpregs(base+XCHAL_NCP_SA_SIZE, 0); -}*/ - - -// initialize the extra processor (NOT IMPLEMENTED) -/*void xthal_init_extra() -{ -}*/ - -// initialize the TIE coprocessor (NOT IMPLEMENTED) -/*void xthal_init_cp(int cp) -{ -}*/ - - -#if 0 - -/* read extra state register (NOT IMPLEMENTED) */ -int xthal_read_extra(void *base, unsigned reg, unsigned *value) -{ - if (reg&0x1000) { - switch(reg) { -#if XCHAL_HAVE_MAC16 - case 16: - *value = ((unsigned *)base)[0]; - return reg; - case 17: - *value = ((unsigned *)base)[1]; - return reg; - case 32: - *value = ((unsigned *)base)[2]; - return reg; - case 33: - *value = ((unsigned *)base)[3]; - return reg; - case 34: - *value = ((unsigned *)base)[4]; - return reg; - case 35: - *value = ((unsigned *)base)[5]; - return reg; -#endif /* XCHAL_HAVE_MAC16 */ - } - } - return -1; -} - -/* write extra state register (NOT IMPLEMENTED) */ -int xthal_write_extra(void *base, unsigned reg, unsigned value) -{ - if (reg&0x1000) { - switch(reg) { -#if XCHAL_HAVE_MAC16 - case 16: - ((unsigned *)base)[0] = value; - return reg; - case 17: - ((unsigned *)base)[1] = value; - return reg; - case 32: - ((unsigned *)base)[2] = value; - return reg; - case 33: - ((unsigned *)base)[3] = value; - return reg; - case 34: - ((unsigned *)base)[4] = value; - return reg; - case 35: - ((unsigned *)base)[5] = value; - return reg; -#endif /* XCHAL_HAVE_MAC16 */ - } - } - return -1; -} - -#endif /*0*/ - - -/* read TIE coprocessor register (NOT IMPLEMENTED) */ -/*int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value) -{ - return -1; -}*/ - -/* write TIE coproessor register (NOT IMPLEMENTED) */ -/*int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value) -{ - return -1; -}*/ - -/* return coprocessor number based on register (NOT IMPLEMENTED) */ -/*int xthal_which_cp(unsigned reg) -{ - return -1; -}*/ - diff --git a/src/arch/xtensa/hal/state_asm.S b/src/arch/xtensa/hal/state_asm.S deleted file mode 100644 index 108986228584..000000000000 --- a/src/arch/xtensa/hal/state_asm.S +++ /dev/null @@ -1,433 +0,0 @@ -/* - * state_asm.S - assembly language processor management routines - */ - -/* - * Copyright (c) 2005-2010 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - - .text - -//---------------------------------------------------------------------- -// save the extra processor state. -//---------------------------------------------------------------------- - -#if defined(__SPLIT__save_extra) ||\ - defined(__SPLIT__save_extra_nw) - -// void xthal_save_extra(void *base) - -DECLFUNC(xthal_save_extra) - abi_entry - xchal_extra_store_funcbody - abi_return - endfunc - - -//---------------------------------------------------------------------- -// restore the extra processor state. -//---------------------------------------------------------------------- - -#elif defined(__SPLIT__restore_extra) ||\ - defined(__SPLIT__restore_extra_nw) - -// void xthal_restore_extra(void *base) - -DECLFUNC(xthal_restore_extra) - abi_entry - xchal_extra_load_funcbody - abi_return - endfunc - -//---------------------------------------------------------------------- -// save the TIE COPROCESSORS state -//---------------------------------------------------------------------- - -#elif defined(__SPLIT__save_cpregs) ||\ - defined(__SPLIT__save_cpregs_nw) - -// void xthal_save_cpregs(void *base, int) -DECLFUNC(xthal_save_cpregs) - abi_entry - xchal_cpi_store_funcbody - abi_return - endfunc -#elif defined(__SPLIT__save_cp0) ||\ - defined(__SPLIT__save_cp0_nw) -// void xthal_save_cp0(void *base) -DECLFUNC(xthal_save_cp0) - abi_entry - xchal_cp0_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp1) ||\ - defined(__SPLIT__save_cp1_nw) -// void xthal_save_cp1(void *base) -DECLFUNC(xthal_save_cp1) - abi_entry - xchal_cp1_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp2) ||\ - defined(__SPLIT__save_cp2_nw) -// void xthal_save_cp2(void *base) -DECLFUNC(xthal_save_cp2) - abi_entry - xchal_cp2_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp3) ||\ - defined(__SPLIT__save_cp3_nw) -// void xthal_save_cp3(void *base) -DECLFUNC(xthal_save_cp3) - abi_entry - xchal_cp3_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp4) ||\ - defined(__SPLIT__save_cp4_nw) -// void xthal_save_cp4(void *base) -DECLFUNC(xthal_save_cp4) - abi_entry - xchal_cp4_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp5) ||\ - defined(__SPLIT__save_cp5_nw) -// void xthal_save_cp5(void *base) -DECLFUNC(xthal_save_cp5) - abi_entry - xchal_cp5_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp6) || \ - defined(__SPLIT__save_cp6_nw) -// void xthal_save_cp6(void *base) -DECLFUNC(xthal_save_cp6) - abi_entry - xchal_cp6_store_a2 - abi_return - endfunc -#elif defined(__SPLIT__save_cp7) ||\ - defined(__SPLIT__save_cp7_nw) -// void xthal_save_cp7(void *base) -DECLFUNC(xthal_save_cp7) - abi_entry - xchal_cp7_store_a2 - abi_return - endfunc - -//---------------------------------------------------------------------- -// restore the TIE coprocessor state -//---------------------------------------------------------------------- - -#elif defined(__SPLIT__restore_cpregs) ||\ - defined(__SPLIT__restore_cpregs_nw) - -// void xthal_restore_cpregs(void *base, int) - -DECLFUNC(xthal_restore_cpregs) - abi_entry - xchal_cpi_load_funcbody - abi_return - endfunc -#elif defined(__SPLIT__restore_cp0) ||\ - defined(__SPLIT__restore_cp0_nw) -// void xthal_restore_cp0(void *base) -DECLFUNC(xthal_restore_cp0) - abi_entry - xchal_cp0_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp1) ||\ - defined(__SPLIT__restore_cp1_nw) -// void xthal_restore_cp1(void *base) -DECLFUNC(xthal_restore_cp1) - abi_entry - xchal_cp1_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp2) ||\ - defined(__SPLIT__restore_cp2_nw) -// void xthal_restore_cp2(void *base) -DECLFUNC(xthal_restore_cp2) - abi_entry - xchal_cp2_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp3) || \ - defined(__SPLIT__restore_cp3_nw) -// void xthal_restore_cp3(void *base) -DECLFUNC(xthal_restore_cp3) - abi_entry - xchal_cp3_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp4) || \ - defined(__SPLIT__restore_cp4_nw) -// void xthal_restore_cp4(void *base) -DECLFUNC(xthal_restore_cp4) - abi_entry - xchal_cp4_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp5) || \ - defined(__SPLIT__restore_cp5_nw) -// void xthal_restore_cp5(void *base) -DECLFUNC(xthal_restore_cp5) - abi_entry - xchal_cp5_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp6) || \ - defined(__SPLIT__restore_cp6_nw) -// void xthal_restore_cp6(void *base) -DECLFUNC(xthal_restore_cp6) - abi_entry - xchal_cp6_load_a2 - abi_return - endfunc -#elif defined(__SPLIT__restore_cp7) || \ - defined(__SPLIT__restore_cp7_nw) -// void xthal_restore_cp7(void *base) -DECLFUNC(xthal_restore_cp7) - abi_entry - xchal_cp7_load_a2 - abi_return - endfunc - -#elif defined(__SPLIT__cpregs_save_fn) - .section .rodata, "a" -_SYM(Xthal_cpregs_save_fn) -# ifdef __XTENSA_CALL0_ABI__ -_SYM(Xthal_cpregs_save_nw_fn) -# endif - .long xthal_save_cp0 - .long xthal_save_cp1 - .long xthal_save_cp2 - .long xthal_save_cp3 - .long xthal_save_cp4 - .long xthal_save_cp5 - .long xthal_save_cp6 - .long xthal_save_cp7 - endfunc - .text - -#elif defined(__SPLIT__cpregs_save_nw_fn) -# ifndef __XTENSA_CALL0_ABI__ - .section .rodata, "a" -_SYM(Xthal_cpregs_save_nw_fn) - .long xthal_save_cp0_nw - .long xthal_save_cp1_nw - .long xthal_save_cp2_nw - .long xthal_save_cp3_nw - .long xthal_save_cp4_nw - .long xthal_save_cp5_nw - .long xthal_save_cp6_nw - .long xthal_save_cp7_nw - endfunc - .text -# endif - -#elif defined(__SPLIT__cpregs_restore_fn) - .section .rodata, "a" -_SYM(Xthal_cpregs_restore_fn) -# ifdef __XTENSA_CALL0_ABI__ -_SYM(Xthal_cpregs_restore_nw_fn) -# endif - .long xthal_restore_cp0 - .long xthal_restore_cp1 - .long xthal_restore_cp2 - .long xthal_restore_cp3 - .long xthal_restore_cp4 - .long xthal_restore_cp5 - .long xthal_restore_cp6 - .long xthal_restore_cp7 - endfunc - .text - -#elif defined(__SPLIT__cpregs_restore_nw_fn) -# ifndef __XTENSA_CALL0_ABI__ - .section .rodata, "a" -_SYM(Xthal_cpregs_restore_nw_fn) - .long xthal_restore_cp0_nw - .long xthal_restore_cp1_nw - .long xthal_restore_cp2_nw - .long xthal_restore_cp3_nw - .long xthal_restore_cp4_nw - .long xthal_restore_cp5_nw - .long xthal_restore_cp6_nw - .long xthal_restore_cp7_nw - endfunc - .text -# endif - - -//---------------------------------------------------------------------- -// coprocessor enable/disable -//---------------------------------------------------------------------- - -#elif defined(__SPLIT__validate_cp) ||\ - defined(__SPLIT__validate_cp_nw) - -// validate the register file. -// void xthal_validate_cp(int) - -DECLFUNC(xthal_validate_cp) - abi_entry -#if XCHAL_HAVE_CP - rsr.cpenable a3 - movi a4, 1 - ssl a2 - sll a4, a4 - or a3, a3, a4 - wsr.cpenable a3 -#endif - abi_return - endfunc - -#elif defined(__SPLIT__invalidate_cp) || \ - defined(__SPLIT__invalidate_cp_nw) - -// invalidate the register file. -// void xthal_invalidate_cp(int) - -DECLFUNC(xthal_invalidate_cp) - abi_entry -#if XCHAL_HAVE_CP - rsr.cpenable a3 - movi a4, 1 - ssl a2 - sll a4, a4 - and a4, a3, a4 - xor a3, a3, a4 - wsr.cpenable a3 -#endif - abi_return - endfunc - - -//---------------------------------------------------------------------- -// Access the CPENABLE register -//---------------------------------------------------------------------- - -#elif defined(__SPLIT__get_cpenable) || \ - defined(__SPLIT__get_cpenable_nw) - -// unsigned xthal_get_cpenable(void); - -DECLFUNC(xthal_get_cpenable) - abi_entry -#if XCHAL_HAVE_CP - rsr.cpenable a2 -#else - movi a2, 0 // if no CPENABLE (no coprocessors), none is ever enabled -#endif - abi_return - endfunc - -#elif defined(__SPLIT__set_cpenable) ||\ - defined(__SPLIT__set_cpenable_nw) - -// void xthal_set_cpenable(unsigned); -// -// Note: to help asm code performance (eg. OS task switch), -// this routine returns the previous value of CPENABLE in a3 -// (not a2, because that could require an extra mov instruction). -// This return value is not shown in the prototype, because -// C code won't see it. -// [Perhaps this should go in an RTOS-specific Core HAL or BSP. TBD.] - -DECLFUNC(xthal_set_cpenable) - abi_entry -#if XCHAL_HAVE_CP - //rsr.cpenable a3 // return previous CPENABLE - movi a3, 0 // for now, always return 0 (VxWorks currently done that way) - - wsr.cpenable a2 -#else - movi a3, 0 // if no CPENABLE (no coprocessors), none is ever enabled -#endif - abi_return - endfunc -#endif - - -/* Nothing implemented below this point. */ -/************************************************************************/ - -#if 0 - -//---------------------------------------------------------------------- -// initialize the processor state -//---------------------------------------------------------------------- - -// void xthal_init_extra_nw() - .global xthal_init_extra_nw - .align 4 -xthal_init_extra_nw: - //addi sp, sp, 0 - ... NOT IMPLEMENTED ... - ret - -//---------------------------------------------------------------------- -// initialize the TIE coprocessor -//---------------------------------------------------------------------- - -// void xthal_init_cp_nw(int) - .global xthal_init_cp_nw - .align 4 -xthal_init_cp_nw: - //addi sp, sp, 0 - ... NOT IMPLEMENTED ... - ret - -//---------------------------------------------------------------------- -// -//---------------------------------------------------------------------- - -// initialize the extra processor -// void xthal_init_mem_extra_nw() - .global xthal_init_mem_extra_nw - .align 4 -xthal_init_mem_extra_nw: - //addi sp, sp, 0 - ... NOT IMPLEMENTED ... - ret - -//---------------------------------------------------------------------- -// -//---------------------------------------------------------------------- - -// initialize the TIE coprocessor -// void xthal_init_mem_cp_nw(int) - .global xthal_init_mem_cp_nw - .align 4 -xthal_init_mem_cp_nw: - //addi sp, sp, 0 - ... NOT IMPLEMENTED ... - ret - -#endif /*0*/ - diff --git a/src/arch/xtensa/hal/syscache_asm.S b/src/arch/xtensa/hal/syscache_asm.S deleted file mode 100644 index 0716ddca17ff..000000000000 --- a/src/arch/xtensa/hal/syscache_asm.S +++ /dev/null @@ -1,141 +0,0 @@ -// -// syscache_asm.S - system-dependent assembly language cache management routines -// -// These functions are now obsolete. They cannot be properly implemented -// in the HAL, because the required settings of CACHEATTR are entirely -// system- or board-dependent. The HAL is not board specific; it is merely -// processor-configuration specific. These cache enable and disable -// functions do a "best-guess" of what values may be appropriate. -// They should be avoided. (Instead, use xthal_set_[id]cacheattr() -// and provide specific CACHEATTR values for the board or system. -// See the LSP ref manual for info on how to obtain such a value as -// computed by xt-genldscripts for a specific LSP, e.g. by using the -// address of the _memmap_cacheattr_reset symbol.) -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/syscache_asm.S#1 $ - -// Copyright (c) 2003-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#ifdef INCLUDE_DEPRECATED_HAL_CACHE_CODE - -#include -#include -#include - -/*** Modify this for your particular board or system: ***/ -#define CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_DEFAULT -#define CACHEATTR_BYPASS XSHAL_ISS_CACHEATTR_BYPASS - -//---------------------------------------------------------------------- -// Enable and disable the caches -//---------------------------------------------------------------------- - - .text - - .global xthal_icache_enable - .global xthal_dcache_enable - .global xthal_icache_enable_nw - .global xthal_dcache_enable_nw - - .global xthal_icache_disable - .global xthal_dcache_disable - .global xthal_icache_disable_nw - .global xthal_dcache_disable_nw - - /* - * Since we can't enable/disable the icache and dcache independently, - * and don't have a nice place to store a state which would enable - * us to only enable them both when both have been requested to be - * enabled, we simply enable both for any request to enable either, - * and disable both for any request to disable either cache. - */ - - .align 4 -xthal_icache_enable: - abi_entry - movi a3, xthal_set_icacheattr - movi a6, CACHEATTR_DEFAULT // get cache-enabled attributes - callx4 a3 // enable i-cache - mov a2, a6 // (in case future version has a return value) - abi_return - .size xthal_icache_enable, . - xthal_icache_enable - - .align 4 -xthal_dcache_enable: - abi_entry - movi a3, xthal_set_dcacheattr - movi a6, CACHEATTR_DEFAULT // get cache-enabled attributes - callx4 a3 // enable d-cache - mov a2, a6 // (in case future version has a return value) - abi_return - .size xthal_dcache_enable, . - xthal_dcache_enable - - .align 4 -xthal_icache_disable: - abi_entry - movi a3, xthal_set_icacheattr - movi a6, CACHEATTR_BYPASS // get cache-disabled attributes - callx4 a3 // disable i-cache - mov a2, a6 // (in case future version has a return value) - abi_return - .size xthal_icache_disable, . - xthal_icache_disable - - .align 4 -xthal_dcache_disable: - abi_entry - movi a3, xthal_set_dcacheattr - movi a6, CACHEATTR_BYPASS // get cache-disabled attributes - callx4 a3 // disable d-cache - mov a2, a6 // (in case future version has a return value) - abi_return - .size xthal_dcache_disable, . - xthal_dcache_disable - - .align 4 -xthal_icache_enable_nw: - movi a3, xthal_set_icacheattr_nw - movi a2, CACHEATTR_DEFAULT // get cache-enabled attributes - jx a3 // enable i-cache - .size xthal_icache_enable_nw, . - xthal_icache_enable_nw - - .align 4 -xthal_dcache_enable_nw: - movi a3, xthal_set_dcacheattr_nw - movi a2, CACHEATTR_DEFAULT // get cache-enabled attributes - jx a3 // enable d-cache - .size xthal_dcache_enable_nw, . - xthal_dcache_enable_nw - - .align 4 -xthal_icache_disable_nw: - movi a3, xthal_set_icacheattr_nw - movi a2, CACHEATTR_BYPASS // get cache-disabled attributes - jx a3 // disable i-cache - .size xthal_icache_disable_nw, . - xthal_icache_disable_nw - - .align 4 -xthal_dcache_disable_nw: - movi a3, xthal_set_dcacheattr_nw - movi a2, CACHEATTR_BYPASS // get cache-disabled attributes - jx a3 // disable d-cache - .size xthal_dcache_disable_nw, . - xthal_dcache_disable_nw - -#endif /* INCLUDE_DEPRECATED_HAL_CACHE_CODE */ - diff --git a/src/arch/xtensa/hal/windowspill_asm.S b/src/arch/xtensa/hal/windowspill_asm.S deleted file mode 100644 index 4568a41f5175..000000000000 --- a/src/arch/xtensa/hal/windowspill_asm.S +++ /dev/null @@ -1,386 +0,0 @@ -// -// windowspill.S -- register window spill routine -// -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/hal/windowspill_asm.S#1 $ - -// Copyright (c) 1999-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - -// xthal_window_spill_nw -// -// Spill live register windows to the stack. -// -// Required entry conditions: -// PS.WOE = 0 -// PS.INTLEVEL >= XCHAL_EXCM_LEVEL -// a1 = valid stack pointer (note: some regs may be spilled at a1-16) -// a0 = return PC (usually set by call0 or callx0 when calling this function) -// a2,a3 undefined -// a4 thru a15 valid, if they are part of window(s) to be spilled -// (Current window a0..a15 saved if necessary.) -// WINDOWSTART[WINDOWBASE] = 1 -// -// Exit conditions: -// PS.WOE, PS.INTLEVEL = same as on entry -// WINDOWBASE = same as on entry -// WINDOWSTART updated to reflect spilled windows -// (equals 1< successful -// (WINDOWSTART = 1< invalid WINDOWSTART (WINDOWBASE bit not set) -// (WINDOWSTART unchanged) -// 2 --> invalid window size (not 4, 8 or 12 regs) -// (WINDOWSTART bits of successfully spilled -// windows are cleared, others left intact) -// a3 clobbered -// a4,a5,a8,a9,a12,a13 = same as on entry -// a6,a7,a10,a11,a14,a15 clobbered if they were part of window(s) -// to be spilled, otherwise they are the same as on entry -// loop registers (LCOUNT,LBEG,LEND) are NOT affected (they were in earlier versions) -// SAR clobbered -// -// All non-spilled register windows will be spilled. -// Beware that this may include a4..a15 of the current window, -// so generally these should not have been clobbered by the -// caller if it is at all possible that these registers -// are part of an unspilled window (it often is possible) -// (otherwise the spilled stack would be invalid). -// -// THIS MEANS: the caller is responsible for saving a0-a15 but -// the caller must leave a4-a15 intact when control is transferred -// here. -// -// It may be reentrant (but stack pointer is invalid during -// execution due to window rotations, so can't take interrupts -// and exceptions in the usual manner, so ... what does -// reentrancy really mean here?). - - - // The xthal_spill_registers_into_stack_nw entry point - // is kept here only for backwards compatibility. - // It will be removed in the very near future. - .global xthal_spill_registers_into_stack_nw - - .text - .align 4 - .global xthal_window_spill_nw -xthal_window_spill_nw: -xthal_spill_registers_into_stack_nw: // BACKWARD COMPATIBILITY ONLY - see above - -#if ! XCHAL_HAVE_WINDOWED - // Nothing to do -- window option was not selected. - movi a2, 0 // always report success - ret -#else /* XCHAL_HAVE_WINDOWED */ -#define WSBITS (XCHAL_NUM_AREGS / 4) /* width of WINDOWSTART register in bits */ -#define WBBITS (XCHAL_NUM_AREGS_LOG2 - 2) /* width of WINDOWBASE register in bits */ - /* - * Rearrange (rotate) window start bits relative to the current - * window (WINDOWBASE). WINDOWSTART currently looks like this: - * - * a15-a0 - * NAREG-1 | | 0 - * | vvvv | - * xxxxxxxxxx1yyyyy - * ^ - * | - * WINDOWBASE - * - * The start bit pointed to by WINDOWBASE must be set - * (we return an error if it isn't), as it corresponds - * to the start of the current window (shown as a0-a15). - * - * We want the window start bits rotated to look like this: - * 1yyyyyxxxxxxxxxx - * - * Note that there is one start bit for every four registers; - * and the total number of registers (NAREG) can be 32 or 64; - * so the number of start bits in WINDOWSTART is NAREG/4, - * and the size of WINDOWSTART can be 8 or 16. - */ - - rsr.windowbase a2 - addi a2, a2, 1 - ssr a2 // sar = WINDOWBASE + 1 - rsr.windowstart a3 - srl a2, a3 // a2 is 0... | 000000xxxxxxxxxx = WINDOWSTART >> sar - sll a3, a3 // a3 is 1yyyyy0000000000 | 0... = WINDOWSTART << (32 - sar) - bgez a3, .Linvalid_ws // verify that msbit is indeed set - - srli a3, a3, 32-WSBITS // a3 is 0... | 1yyyyy0000000000 = a3 >> (32-NAREG/4) - or a2, a2, a3 // a2 is 0... | 1yyyyyxxxxxxxxxx - - /* - * FIND THE FIRST ONE - * - * Now we have (in a2) the window start bits rotated in order - * from oldest (closest to lsbit) to current (msbit set). - * Each start bit (that is set), other than the current one, - * corresponds to a window frame to spill. - * - * Now find the first start bit, ie. the first frame to spill, - * by looking for the first bit set in a2 (from lsbit side). - */ - -#if XCHAL_HAVE_NSA - neg a3, a2 // keep only the least-significant bit set of a2 ... - and a3, a3, a2 // ... in a3 - nsau a3, a3 // get index of that bit, numbered from msbit (32 if absent) - ssl a3 // set sar = 32 - a3 = bit index numbered from lsbit + 1 -#else /* XCHAL_HAVE_NSA */ - wsr.windowstart a2 // temporarily save rotated start bits - // (we can use WINDOWSTART because WOE=0) - - // NOTE: this could be optimized a bit, by explicit coding rather than the macro. - find_ls_one a3, a2 // set a3 to index of lsmost bit set in a2 (a2 clobbered) - - addi a2, a3, 1 // index+1 - ssr a2 // set sar = index + 1 - rsr.windowstart a2 // restore a2 (rotated start bits) -#endif /* XCHAL_HAVE_NSA */ - srl a2, a2 // right-justify the rotated start bits (dropping lsbit set) - wsr.windowstart a2 // save rotated + justified window start bits, - // because a2 will disappear when modifying WINDOWBASE - // again, we can use WINDOWSTART because WOE=0 - - /* - * Rotate WindowBase so that a0 of the next window to spill is in a4 - * (ie. leaving us with a2 and a3 to play with, because a0 and a1 - * may be those of the original window which we must preserve). - */ - rsr.windowbase a2 -#if XCHAL_HAVE_NSA - addi a2, a2, 31 - sub a3, a2, a3 // a3 = WINDOWBASE + index = WINDOWBASE + (31 - msbit_index) -#else /* XCHAL_HAVE_NSA */ - add a3, a2, a3 // a3 = WINDOWBASE + index -#endif /* XCHAL_HAVE_NSA */ - wsr.windowbase a3 // effectively do: rotw index - rsync // wait for write to WINDOWBASE to complete - // Now our registers have changed! - - rsr.windowstart a2 // restore a2 (rotated + justified window start bits) - - /* - * We are now ready to start the window spill loop. - * Relative to the above, a2 and WINDOWBASE are now as follows: - * - * 1yyyyyxxxxxxxxxx = rotated start bits as shown above - * 1yyyyyxxxx100000 = actual rotated start bits (example) - * 0000001yyyyyxxxx ^ = a2 = rotated + justified start bits - * ^ xxx1^ = window being spilled - * ^ ^ - * | | - * original current - * WINDOWBASE WINDOWBASE - * - * The first window to spill (save) starts at what is now a4. - * The spill loop maintains the adjusted start bits in a2, - * shifting them right as each window is spilled. - */ - -.Lspill_loop: - // Top of save loop. - // Find the size of this call and branch to the appropriate save routine. - - beqz a2, .Ldone // if no start bit remaining, we're done - bbsi.l a2, 0, .Lspill4 // if next start bit is set, it's a call4 - bbsi.l a2, 1, .Lspill8 // if 2nd next bit set, it's a call8 - bbsi.l a2, 2, .Lspill12 // if 3rd next bit set, it's a call12 - j .Linvalid_window // else it's an invalid window! - - - - // SAVE A CALL4 -.Lspill4: - addi a3, a9, -16 // a3 gets call[i+1]'s sp - 16 - s32i a4, a3, 0 // store call[i]'s a0 - s32i a5, a3, 4 // store call[i]'s a1 - s32i a6, a3, 8 // store call[i]'s a2 - s32i a7, a3, 12 // store call[i]'s a3 - - srli a6, a2, 1 // move and shift the start bits - rotw 1 // rotate the window - - j .Lspill_loop - - // SAVE A CALL8 -.Lspill8: - addi a3, a13, -16 // a0 gets call[i+1]'s sp - 16 - s32i a4, a3, 0 // store call[i]'s a0 - s32i a5, a3, 4 // store call[i]'s a1 - s32i a6, a3, 8 // store call[i]'s a2 - s32i a7, a3, 12 // store call[i]'s a3 - - addi a3, a5, -12 // call[i-1]'s sp address - l32i a3, a3, 0 // a3 is call[i-1]'s sp - // (load slot) - addi a3, a3, -32 // a3 points to our spill area - - s32i a8, a3, 0 // store call[i]'s a4 - s32i a9, a3, 4 // store call[i]'s a5 - s32i a10, a3, 8 // store call[i]'s a6 - s32i a11, a3, 12 // store call[i]'s a7 - - srli a10, a2, 2 // move and shift the start bits - rotw 2 // rotate the window - - j .Lspill_loop - - // SAVE A CALL12 -.Lspill12: - rotw 1 // rotate to see call[i+1]'s sp - - addi a13, a13, -16 // set to the reg save area - s32i a0, a13, 0 // store call[i]'s a0 - s32i a1, a13, 4 // store call[i]'s a1 - s32i a2, a13, 8 // store call[i]'s a2 - s32i a3, a13, 12 // store call[i]'s a3 - - addi a3, a1, -12 // call[i-1]'s sp address - l32i a3, a3, 0 // a3 has call[i-1]'s sp - addi a13, a13, 16 // restore call[i+1]'s sp (here to fill load slot) - addi a3, a3, -48 // a3 points to our save area - - s32i a4, a3, 0 // store call[i]'s a4 - s32i a5, a3, 4 // store call[i]'s a5 - s32i a6, a3, 8 // store call[i]'s a6 - s32i a7, a3, 12 // store call[i]'s a7 - s32i a8, a3, 16 // store call[i]'s a4 - s32i a9, a3, 20 // store call[i]'s a5 - s32i a10, a3, 24 // store call[i]'s a6 - s32i a11, a3, 28 // store call[i]'s a7 - - rotw -1 // rotate to see start bits (a2) - srli a14, a2, 3 // move and shift the start bits - rotw 3 // rotate to next window - - j .Lspill_loop - - - -.Ldone: - rotw 1 // back to the original window - rsr.windowbase a2 // get (original) window base - ssl a2 // setup for shift left by WINDOWBASE - movi a2, 1 - sll a2, a2 // compute new WINDOWSTART = 1< - */ - -#ifdef __SOF_ATOMIC_H__ - -#ifndef __ARCH_ATOMIC_H__ -#define __ARCH_ATOMIC_H__ - -#include -#if XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ -#include -#endif -#include - -typedef struct { - volatile int32_t value; -} atomic_t; - -static inline int32_t arch_atomic_read(const atomic_t *a) -{ - return (*(volatile int32_t *)&a->value); -} - -static inline void arch_atomic_set(atomic_t *a, int32_t value) -{ - a->value = value; -} - -static inline void arch_atomic_init(atomic_t *a, int32_t value) -{ - arch_atomic_set(a, value); -} - -#if XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ - -/* Use exclusive instructions */ -static inline int32_t arch_atomic_add(atomic_t *a, int32_t value) -{ - /*reference xtos : xipc_misc.h*/ - int32_t result = 0; - int32_t current; - - while (!result) { - current = XT_L32EX((int32_t *)a); - result = current + value; - XT_S32EX(result, (int32_t *)a); - XT_GETEX(result); - } - return current; -} - -static inline int32_t arch_atomic_sub(atomic_t *a, int32_t value) -{ - /*reference xtos : xipc_misc.h*/ - int32_t current; - int32_t result = 0; - - while (!result) { - current = XT_L32EX((int *)a); - result = current - value; - XT_S32EX(result, (int *)a); - XT_GETEX(result); - } - return current; -} - -#elif XCHAL_HAVE_S32C1I - -/* Use S32C1I instructions */ -static inline int32_t arch_atomic_add(atomic_t *a, int32_t value) -{ - int32_t result, current; - - __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " wsr %1, scompare1\n" - " add %0, %1, %3\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (current) - : "a" (&a->value), "a" (value) - : "memory"); - - return current; -} - -static inline int32_t arch_atomic_sub(atomic_t *a, int32_t value) -{ - int32_t result, current; - - __asm__ __volatile__( - "1: l32i %1, %2, 0\n" - " wsr %1, scompare1\n" - " sub %0, %1, %3\n" - " s32c1i %0, %2, 0\n" - " bne %0, %1, 1b\n" - : "=&a" (result), "=&a" (current) - : "a" (&a->value), "a" (value) - : "memory"); - - return current; -} - -#else - -#if CONFIG_CORE_COUNT > 1 - -#error No atomic ISA for SMP configuration - -#endif - -/* - * The ISA has no atomic operations so use integer arithmetic on uniprocessor systems. - * This helps support GCC and qemu emulation of certain targets. - */ - -/* integer arithmetic methods */ -static inline int32_t arch_atomic_add(atomic_t *a, int32_t value) -{ - int32_t result, current; - - current = arch_atomic_read(a); - result = current + value; - arch_atomic_set(a, result); - return current; -} - -static inline int32_t arch_atomic_sub(atomic_t *a, int32_t value) -{ - int32_t result, current; - - current = arch_atomic_read(a); - result = current - value; - arch_atomic_set(a, result); - return current; -} - -#endif /* XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ */ - -#endif /* __ARCH_ATOMIC_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/atomic.h" - -#endif /* __SOF_ATOMIC_H__ */ diff --git a/src/arch/xtensa/include/arch/compiler_info.h b/src/arch/xtensa/include/arch/compiler_info.h deleted file mode 100644 index 7a2d24ebe6db..000000000000 --- a/src/arch/xtensa/include/arch/compiler_info.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Karol Trzcinski - */ - -/** - * \file include/sof/compiler_info.h - * \brief Compiler version and name descriptor - * \author Karol Trzcinski - */ - -#ifndef __ARCH_COMPILER_INFO_H__ -#define __ARCH_COMPILER_INFO_H__ - -#include -#include - -/* read used compilator name and version */ -/* CC_NAME must consist of 3 characters with null termination */ -/* See declaration of sof_ipc_cc_version. */ -#ifdef __XCC__ -#define CC_MAJOR (XTHAL_RELEASE_MAJOR / 1000) -#define CC_MINOR ((XTHAL_RELEASE_MAJOR % 1000) / 10) -#define CC_MICRO XTHAL_RELEASE_MINOR -#define CC_NAME "XCC" -#else -#define CC_MAJOR __GNUC__ -#define CC_MINOR __GNUC_MINOR__ -#define CC_MICRO __GNUC_PATCHLEVEL__ -#define CC_NAME "GCC" - -#if CC_MAJOR >= 10 -#define CC_USE_LIBC -#endif - -#endif - -#define CC_DESC " " XCC_TOOLS_VERSION - -#endif /* __ARCH_COMPILER_INFO_H__ */ diff --git a/src/arch/xtensa/include/arch/debug/backtrace.h b/src/arch/xtensa/include/arch/debug/backtrace.h deleted file mode 100644 index 38ecc66d7786..000000000000 --- a/src/arch/xtensa/include/arch/debug/backtrace.h +++ /dev/null @@ -1,47 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2020 Intel Corporation. All rights reserved. - */ - -#ifdef __SOF_DEBUG_BACKTRACE_H__ - -#ifndef __ARCH_DEBUG_BACKTRACE_H__ -#define __ARCH_DEBUG_BACKTRACE_H__ - -#include -#include -#include - -static inline void *arch_get_stack_ptr(void) -{ - void *ptr; - - /* stack pointer is in a1 */ - __asm__ __volatile__ ("mov %0, a1" - : "=a" (ptr) - : - : "memory"); - return ptr; -} - -static inline void *arch_get_stack_entry(void) -{ - volatile xtos_task_context *task_ctx = task_context_get(); - - return task_ctx->stack_base; -} - -static inline uint32_t arch_get_stack_size(void) -{ - volatile xtos_task_context *task_ctx = task_context_get(); - - return task_ctx->stack_size; -} - -#endif /* __ARCH_DEBUG_BACKTRACE_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/debug/backtrace.h" - -#endif /* __SOF_DEBUG_BACKTRACE_H__ */ diff --git a/src/arch/xtensa/include/arch/debug/gdb/init.h b/src/arch/xtensa/include/arch/debug/gdb/init.h deleted file mode 100644 index 076d55f25bf5..000000000000 --- a/src/arch/xtensa/include/arch/debug/gdb/init.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Marcin Rajwa - */ - -/* - * Header file for init.S - */ - -#ifdef __SOF_DEBUG_GDB_GDB_H__ - -#ifndef __ARCH_DEBUG_GDB_INIT_H__ -#define __ARCH_DEBUG_GDB_INIT_H__ - -extern void gdb_init_debug_exception(void); - -#endif /* __ARCH_DEBUG_GDB_INIT_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/debug/gdb/gdb.h" - -#endif /* __SOF_DEBUG_GDB_GDB_H__ */ diff --git a/src/arch/xtensa/include/arch/debug/gdb/utilities.h b/src/arch/xtensa/include/arch/debug/gdb/utilities.h deleted file mode 100644 index c786d774c6fe..000000000000 --- a/src/arch/xtensa/include/arch/debug/gdb/utilities.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Marcin Rajwa - */ - -/* - * Header file for Xtensa-GDB utilities. - */ - -#ifndef __ARCH_DEBUG_GDB_UTILITIES_H__ -#define __ARCH_DEBUG_GDB_UTILITIES_H__ - -/* Implicit inclusion from sof/debug/gdb/gdb.h */ -#include - -void arch_gdb_read_sr(int sr); -void arch_gdb_write_sr(int sr, int *sregs); -unsigned char arch_gdb_load_from_memory(void *mem); -void arch_gdb_memory_load_and_store(void *mem, unsigned char ch); -void arch_gdb_single_step(int *sregs); - -#endif /* __ARCH_DEBUG_GDB_UTILITIES_H__ */ diff --git a/src/arch/xtensa/include/arch/debug/gdb/xtensa-defs.h b/src/arch/xtensa/include/arch/debug/gdb/xtensa-defs.h deleted file mode 100644 index 8ae65da38472..000000000000 --- a/src/arch/xtensa/include/arch/debug/gdb/xtensa-defs.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Marcin Rajwa - */ - -/* - * Header file for xtensa specific defs for GDB. - */ - -#ifndef __ARCH_DEBUG_GDB_XTENSA_DEFS_H__ -#define __ARCH_DEBUG_GDB_XTENSA_DEFS_H__ - -#include -#include - -#define _AREG0 256 - -#define STACK_SIZE 1024 -#define DEBUG_PC (EPC + XCHAL_DEBUGLEVEL) -#define DEBUG_EXCSAVE (EXCSAVE + XCHAL_DEBUGLEVEL) -#define DEBUG_PS (EPS + XCHAL_DEBUGLEVEL) -#define DEBUG_WINDOWBASE WINDOWBASE -#define DEBUG_NUM_IBREAK XCHAL_NUM_IBREAK -#define DEBUG_IBREAKENABLE IBREAKENABLE -#define DEBUG_IBREAKA IBREAKA -#define DEBUG_INTENABLE INTENABLE -#define DEBUG_NUM_AREGS XCHAL_NUM_AREGS - -#endif /* __ARCH_DEBUG_GDB_XTENSA_DEFS_H__ */ diff --git a/src/arch/xtensa/include/arch/debug/offset-defs.h b/src/arch/xtensa/include/arch/debug/offset-defs.h deleted file mode 100644 index 682e90c5f97b..000000000000 --- a/src/arch/xtensa/include/arch/debug/offset-defs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - */ - -#ifndef __ARCH_DEBUG_OFFSET_DEFS_H__ -#define __ARCH_DEBUG_OFFSET_DEFS_H__ - -#include - -#define REG_OFFSET_EXCCAUSE 0x0 -#define REG_OFFSET_EXCVADDR 0x4 -#define REG_OFFSET_PS 0x8 -#define REG_OFFSET_EPC1 0xc -#define REG_OFFSET_EPC2 0x10 -#define REG_OFFSET_EPC3 0x14 -#define REG_OFFSET_EPC4 0x18 -#define REG_OFFSET_EPC5 0x1c -#define REG_OFFSET_EPC6 0x20 -#define REG_OFFSET_EPC7 0x24 -#define REG_OFFSET_EPS2 0x28 -#define REG_OFFSET_EPS3 0x2c -#define REG_OFFSET_EPS4 0x30 -#define REG_OFFSET_EPS5 0x34 -#define REG_OFFSET_EPS6 0x38 -#define REG_OFFSET_EPS7 0x3c -#define REG_OFFSET_DEPC 0x40 -#define REG_OFFSET_INTENABLE 0x44 -#define REG_OFFSET_INTERRUPT 0x48 -#define REG_OFFSET_SAR 0x4c -#define REG_OFFSET_DEBUGCAUSE 0x50 -#define REG_OFFSET_WINDOWBASE 0x54 -#define REG_OFFSET_WINDOWSTART 0x58 -#define REG_OFFSET_EXCSAVE1 0x5c -#define REG_OFFSET_AR_BEGIN 0x60 -#define REG_OFFSET_AR_END (REG_OFFSET_AR_BEGIN + 4 * XCHAL_NUM_AREGS) - -#endif /* __ARCH_DEBUG_OFFSET_DEFS_H__ */ diff --git a/src/arch/xtensa/include/arch/debug/panic.h b/src/arch/xtensa/include/arch/debug/panic.h deleted file mode 100644 index 4d940981d2ba..000000000000 --- a/src/arch/xtensa/include/arch/debug/panic.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -#ifdef __XTOS_RTOS_PANIC_H__ - -#ifndef __ARCH_DEBUG_PANIC_H__ -#define __ARCH_DEBUG_PANIC_H__ - -#include -#include -#include -#include -#include - -/* xtensa core specific oops size */ -#define ARCH_OOPS_SIZE (sizeof(struct sof_ipc_dsp_oops_xtensa) \ - + (XCHAL_NUM_AREGS * sizeof(uint32_t))) - -void arch_dump_regs_a(void *dump_buf); - -static inline void fill_core_dump(struct sof_ipc_dsp_oops_xtensa *oops, - uintptr_t stack_ptr, uintptr_t *epc1) -{ - oops->arch_hdr.arch = ARCHITECTURE_ID; - oops->arch_hdr.totalsize = ARCH_OOPS_SIZE; -#if XCHAL_HW_CONFIGID_RELIABLE - oops->plat_hdr.configidhi = XCHAL_HW_CONFIGID0; - oops->plat_hdr.configidlo = XCHAL_HW_CONFIGID1; -#else - oops->plat_hdr.configidhi = 0; - oops->plat_hdr.configidlo = 0; -#endif - oops->plat_hdr.numaregs = XCHAL_NUM_AREGS; - oops->plat_hdr.stackoffset = oops->arch_hdr.totalsize - + sizeof(struct sof_ipc_panic_info); - oops->plat_hdr.stackptr = stack_ptr; - - if (epc1) - oops->epc1 = *epc1; - - /* With crosstool-ng gcc on some platforms this corrupts most of - * the other panic information, including the precious line - * number. See https://github.com/thesofproject/sof/issues/1346 - * Commenting this out loses the registers but avoids the - * corruption of the rest. - */ - arch_dump_regs_a((void *)&oops->exccause); -} - -static inline void arch_dump_regs(void *dump_buf, uintptr_t stack_ptr, - uintptr_t *epc1) -{ - fill_core_dump(dump_buf, stack_ptr, epc1); - - dcache_writeback_region((__sparse_force void __sparse_cache *)dump_buf, ARCH_OOPS_SIZE); -} - -#endif /* __ARCH_DEBUG_PANIC_H__ */ - -#else - -#error "This file shouldn't be included from outside of XTOS's rtos/panic.h" - -#endif /* __XTOS_RTOS_PANIC_H__ */ diff --git a/src/arch/xtensa/include/arch/drivers/idc.h b/src/arch/xtensa/include/arch/drivers/idc.h deleted file mode 100644 index 1a34697bdbba..000000000000 --- a/src/arch/xtensa/include/arch/drivers/idc.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2020 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -#ifdef __XTOS_RTOS_IDC_H__ - -#ifndef __ARCH_DRIVERS_IDC_H__ -#define __ARCH_DRIVERS_IDC_H__ - -#include -#include - -struct idc; - -/** - * \brief Returns IDC data. - * \return Pointer to pointer of IDC data. - */ -static inline struct idc **idc_get(void) -{ - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - return &ctx->idc; -} - -#endif /* __ARCH_DRIVERS_IDC_H__ */ - -#else - -#error "This file shouldn't be included from outside of XTOS's rtos/idc.h" - -#endif /* __XTOS_RTOS_IDC_H__ */ diff --git a/src/arch/xtensa/include/arch/drivers/interrupt.h b/src/arch/xtensa/include/arch/drivers/interrupt.h deleted file mode 100644 index c1a9306c59a5..000000000000 --- a/src/arch/xtensa/include/arch/drivers/interrupt.h +++ /dev/null @@ -1,97 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifdef __SOF_DRIVERS_INTERRUPT_H__ - -#ifndef __ARCH_DRIVERS_INTERRUPT_H__ -#define __ARCH_DRIVERS_INTERRUPT_H__ - -#include -#include -#include -#include - - -static inline int arch_interrupt_register(int irq, - void (*handler)(void *arg), void *arg) -{ - xthal_set_intclear(0x1 << irq); - _xtos_set_interrupt_handler_arg(irq, handler, arg); - return 0; -} - -static inline void arch_interrupt_unregister(int irq) -{ - _xtos_set_interrupt_handler_arg(irq, NULL, NULL); -} - -/* returns previous mask */ -#define arch_interrupt_enable_mask(mask) \ - _xtos_ints_on(mask) - -/* returns previous mask */ -#define arch_interrupt_disable_mask(mask) \ - _xtos_ints_off(mask) - -static inline uint32_t arch_interrupt_get_level(void) -{ - uint32_t level; - - __asm__ __volatile__( - " rsr.ps %0\n" - " extui %0, %0, 0, 4\n" - : "=&a" (level) :: "memory"); - - return level; -} - -static inline void arch_interrupt_set(int irq) -{ - xthal_set_intset(0x1 << irq); -} - -static inline void arch_interrupt_clear(int irq) -{ - xthal_set_intclear(0x1 << irq); -} - -static inline uint32_t arch_interrupt_get_enabled(void) -{ - return xthal_get_intenable(); -} - -static inline uint32_t arch_interrupt_get_status(void) -{ - return xthal_get_interrupt(); -} - -static inline uint32_t arch_interrupt_global_disable(void) -{ - uint32_t flags; - - __asm__ __volatile__("rsil %0, 5" - : "=a" (flags) :: "memory"); - return flags; -} - -static inline void arch_interrupt_global_enable(uint32_t flags) -{ - __asm__ __volatile__("wsr %0, ps; rsync" - :: "a" (flags) : "memory"); -} - -#if CONFIG_WAKEUP_HOOK -void arch_interrupt_on_wakeup(void); -#endif - -#endif /* __ARCH_DRIVERS_INTERRUPT_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/drivers/interrupt.h" - -#endif /* __SOF_INTERRUPT_H__ */ diff --git a/src/arch/xtensa/include/arch/drivers/timer.h b/src/arch/xtensa/include/arch/drivers/timer.h deleted file mode 100644 index c1bc02df76b5..000000000000 --- a/src/arch/xtensa/include/arch/drivers/timer.h +++ /dev/null @@ -1,82 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifdef __SOF_DRIVERS_TIMER_H__ - -#ifndef __ARCH_DRIVERS_TIMER_H__ -#define __ARCH_DRIVERS_TIMER_H__ - -#include -#include -#include - -#define ARCH_TIMER_COUNT 3 - -struct timer { - uint32_t id; - int irq; - int logical_irq; /* used for external timers */ - const char *irq_name; - void (*handler)(void *data); /* optional timer handler */ - void *data; /* optional timer handler's data */ - uint32_t hitime; /* high end of 64bit timer */ - uint32_t hitimeout; - uint32_t lowtimeout; - uint64_t delta; -}; - -/* internal API calls */ -int timer64_register(struct timer *timer, void (*handler)(void *arg), - void *arg); -void timer_64_handler(void *arg); - -static inline int arch_timer_register(struct timer *timer, - void (*handler)(void *arg), void *arg) -{ - uint32_t flags; - int ret; - - flags = arch_interrupt_global_disable(); - timer64_register(timer, handler, arg); - ret = arch_interrupt_register(timer->irq, timer_64_handler, timer); - arch_interrupt_global_enable(flags); - - - return ret; -} - -static inline void arch_timer_unregister(struct timer *timer) -{ - arch_interrupt_unregister(timer->irq); -} - -static inline void arch_timer_enable(struct timer *timer) -{ - arch_interrupt_enable_mask(1 << timer->irq); -} - -static inline void arch_timer_disable(struct timer *timer) -{ - arch_interrupt_disable_mask(1 << timer->irq); -} - -uint64_t arch_timer_get_system(struct timer *timer); - -int64_t arch_timer_set(struct timer *timer, uint64_t ticks); - -static inline void arch_timer_clear(struct timer *timer) -{ - arch_interrupt_clear(timer->irq); -} - -#endif /* __ARCH_DRIVERS_TIMER_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/drivers/timer.h" - -#endif /* __SOF_DRIVERS_TIMER_H__ */ diff --git a/src/arch/xtensa/include/arch/lib/cache.h b/src/arch/xtensa/include/arch/lib/cache.h deleted file mode 100644 index f3e54b7d28cb..000000000000 --- a/src/arch/xtensa/include/arch/lib/cache.h +++ /dev/null @@ -1,162 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifdef __SOF_LIB_CACHE_H__ - -#ifndef __ARCH_LIB_CACHE_H__ -#define __ARCH_LIB_CACHE_H__ - -#include - -#define DCACHE_LINE_SIZE XCHAL_DCACHE_LINESIZE - -#if !defined(__ASSEMBLER__) && !defined(LINKER) - -#include -#include -#include -#include - -#ifdef CONFIG_COMPILER_WORKAROUND_CACHE_ATTR -#include -#endif - -#define SRAM_UNCACHED_ALIAS 0x20000000 - -#ifdef CONFIG_IMX - -#ifdef CONFIG_COMPILER_WORKAROUND_CACHE_ATTR -/* - * We want to avoid buggy compiler optimization (function inlining). - * So we replace the call to glb_addr_attr() from glb_is_cached() - * with a function pointer that is initialized in - * src/arch/xtensa/driver/cache_attr.c - */ -#define is_cached(address) glb_is_cached(address) - -#else /* CONFIG_COMPILER_WORKAROUND_CACHE_ATTR */ -/* - * The _memmap_cacheattr_reset linker script variable has - * dedicate cache attribute for every 512M in 4GB space - * 1: write through - * 2: cache bypass - * 4: write back - * F: invalid access - */ -extern uint32_t _memmap_cacheattr_reset; - -/* - * Since each hex digit keeps the attributes for a 512MB region, - * we have the following address ranges: - * Address range - hex digit - * 0 - 1FFFFFFF - 0 - * 20000000 - 3FFFFFFF - 1 - * 40000000 - 5FFFFFFF - 2 - * 60000000 - 7FFFFFFF - 3 - * 80000000 - 9FFFFFFF - 4 - * A0000000 - BFFFFFFF - 5 - * C0000000 - DFFFFFFF - 6 - * E0000000 - FFFFFFFF - 7 - */ - -/* - * Based on the above information, get the address region id (0-7) - */ -#define _addr_range(address) (((uintptr_t)(address) >> 29) & 0x7) -/* - * Get the position of the cache attribute for a certain memory region. - * There are 4 bits per hex digit. - */ -#define _addr_shift(address) ((_addr_range(address)) << 2) -/* - * For the given address, get the corresponding hex digit - * from the linker script variable that contains the cache attributes - */ -#define _addr_attr(address) ((((uint32_t)(&_memmap_cacheattr_reset)) >> \ - (_addr_shift(address))) & 0xF) -/* - * Check if the address is cacheable or not, by verifying the _addr_attr, - * which for cacheable addresses might be 1 or 4 - */ -#define is_cached(address) ((_addr_attr(address) == 1) || \ - (_addr_attr(address) == 4)) -#endif /* CONFIG_COMPILER_WORKAROUND_CACHE_ATTR */ - -#elif defined(CONFIG_MEDIATEK) -#define is_cached(address) (1) -#else -#define is_cached(address) (!!((uintptr_t)(address) & SRAM_UNCACHED_ALIAS)) -#endif - -static inline void dcache_writeback_region(void __sparse_cache *addr, size_t size) -{ -#if XCHAL_DCACHE_SIZE > 0 - if (is_cached(addr)) - xthal_dcache_region_writeback((__sparse_force void *)addr, size); -#endif -} - -static inline void dcache_writeback_all(void) -{ -#if XCHAL_DCACHE_SIZE > 0 - xthal_dcache_all_writeback(); -#endif -} - -static inline void dcache_invalidate_region(void __sparse_cache *addr, size_t size) -{ -#if XCHAL_DCACHE_SIZE > 0 - if (is_cached(addr)) - xthal_dcache_region_invalidate((__sparse_force void *)addr, size); -#endif -} - -static inline void dcache_invalidate_all(void) -{ -#if XCHAL_DCACHE_SIZE > 0 - xthal_dcache_all_invalidate(); -#endif -} - -static inline void icache_invalidate_region(void *addr, size_t size) -{ -#if XCHAL_ICACHE_SIZE > 0 - xthal_icache_region_invalidate(addr, size); -#endif -} - -static inline void icache_invalidate_all(void) -{ -#if XCHAL_ICACHE_SIZE > 0 - xthal_icache_all_invalidate(); -#endif -} - -static inline void dcache_writeback_invalidate_region(void __sparse_cache *addr, size_t size) -{ -#if XCHAL_DCACHE_SIZE > 0 - if (is_cached(addr)) - xthal_dcache_region_writeback_inv((__sparse_force void *)addr, size); -#endif -} - -static inline void dcache_writeback_invalidate_all(void) -{ -#if XCHAL_DCACHE_SIZE > 0 - xthal_dcache_all_writeback_inv(); -#endif -} - -#endif /* !defined(__ASSEMBLER__) && !defined(LINKER) */ - -#endif /* __ARCH_LIB_CACHE_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/lib/cache.h" - -#endif /* __SOF_LIB_CACHE_H__ */ diff --git a/src/arch/xtensa/include/arch/lib/cpu.h b/src/arch/xtensa/include/arch/lib/cpu.h deleted file mode 100644 index acd89a114f08..000000000000 --- a/src/arch/xtensa/include/arch/lib/cpu.h +++ /dev/null @@ -1,146 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Rander Wang - */ - -#ifdef __SOF_LIB_CPU_H__ - -#ifndef __ARCH_LIB_CPU_H__ -#define __ARCH_LIB_CPU_H__ - -#include -#include - -#if CONFIG_MULTICORE - -/** \brief CPU power down available flags */ -#define CPU_POWER_DOWN_MEMORY_ON BIT(0) /**< Power down core with memory - * enabled (required in d0ix - * flow) - */ - -void cpu_power_down_core(uint32_t flags); - -void cpu_alloc_core_context(int id); - -int arch_cpu_enable_core(int id); - -void arch_cpu_disable_core(int id); - -int arch_cpu_is_core_enabled(int id); - -int arch_cpu_enabled_cores(void); - -int arch_cpu_restore_secondary_cores(void); - -int arch_cpu_secondary_cores_prepare_d0ix(void); - -#else - -static inline int arch_cpu_enable_core(int id) { return 0; } - -static inline void arch_cpu_disable_core(int id) { } - -static inline int arch_cpu_is_core_enabled(int id) { return 1; } - -static inline int arch_cpu_enabled_cores(void) { return 1; } - -static inline int arch_cpu_restore_secondary_cores(void) {return 0; } - -static inline int arch_cpu_secondary_cores_prepare_d0ix(void) {return 0; } - -#endif - -static inline int arch_cpu_get_id(void) -{ - int prid; -#if XCHAL_HAVE_PRID - __asm__("rsr.prid %0" : "=a"(prid)); -#else - prid = PLATFORM_PRIMARY_CORE_ID; -#endif - return prid; -} - -#if !XCHAL_HAVE_THREADPTR -extern unsigned int _virtual_thread_start; -static unsigned int *virtual_thread_ptr = - (unsigned int *)&_virtual_thread_start; -#endif - -static inline void cpu_write_threadptr(int threadptr) -{ -#if XCHAL_HAVE_THREADPTR - __asm__ __volatile__( - "wur.threadptr %0" : : "a" (threadptr) : "memory"); -#else - *virtual_thread_ptr = threadptr; -#endif -} - -static inline int cpu_read_threadptr(void) -{ - int threadptr; -#if XCHAL_HAVE_THREADPTR - __asm__ __volatile__( - "rur.threadptr %0" : "=a"(threadptr)); -#else - threadptr = *virtual_thread_ptr; -#endif - return threadptr; -} - -static inline int cpu_read_vecbase(void) -{ - int vecbase; - - __asm__ __volatile__("rsr.vecbase %0" - : "=a"(vecbase)); - return vecbase; -} - -static inline int cpu_read_excsave2(void) -{ - int excsave2; - - __asm__ __volatile__("rsr.excsave2 %0" - : "=a"(excsave2)); - return excsave2; -} - -static inline int cpu_read_excsave3(void) -{ - int excsave3; - - __asm__ __volatile__("rsr.excsave3 %0" - : "=a"(excsave3)); - return excsave3; -} - -static inline int cpu_read_excsave4(void) -{ - int excsave4; - - __asm__ __volatile__("rsr.excsave4 %0" - : "=a"(excsave4)); - return excsave4; -} - -static inline int cpu_read_excsave5(void) -{ - int excsave5; - - __asm__ __volatile__("rsr.excsave5 %0" - : "=a"(excsave5)); - return excsave5; -} - -#endif /* __ARCH_LIB_CPU_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/lib/cpu.h" - -#endif /* __SOF_LIB_CPU_H__ */ diff --git a/src/arch/xtensa/include/arch/lib/wait.h b/src/arch/xtensa/include/arch/lib/wait.h deleted file mode 100644 index d4166f0a855c..000000000000 --- a/src/arch/xtensa/include/arch/lib/wait.h +++ /dev/null @@ -1,62 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __ARCH_LIB_WAIT_H__ -#define __ARCH_LIB_WAIT_H__ - -#include -#include -#include -#include - -#include - -#if (CONFIG_XT_WAITI_DELAY) - -static inline void arch_wait_for_interrupt(int level) -{ - int i; - - /* need to make sure the interrupt level won't be lowered */ - if (level) - sof_panic(SOF_IPC_PANIC_WFI); - - /* this sequence must be atomic on LX6 */ - XTOS_SET_INTLEVEL(5); - - /* LX6 needs a delay */ - for (i = 0; i < 128; i++) - asm volatile("nop"); - - /* and to flush all loads/stores prior to wait */ - asm volatile("isync"); - asm volatile("extw"); - - /* now wait */ - asm volatile("waiti 0"); -} - -#else - -static inline void arch_wait_for_interrupt(int level) -{ - /* need to make sure the interrupt level won't be lowered */ - if (level) - sof_panic(SOF_IPC_PANIC_WFI); - - asm volatile("waiti 0"); -} - -#endif - -static inline void idelay(int n) -{ - while (n--) - asm volatile("nop"); -} - -#endif /* __ARCH_LIB_WAIT_H__ */ diff --git a/src/arch/xtensa/include/arch/schedule/task.h b/src/arch/xtensa/include/arch/schedule/task.h deleted file mode 100644 index a4c17d07725f..000000000000 --- a/src/arch/xtensa/include/arch/schedule/task.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Tomasz Lauda - */ - -/** - * \file arch/xtensa/include/arch/schedule/task.h - * \brief Arch task header file - * \authors Liam Girdwood - * Tomasz Lauda - */ - -#ifdef __XTOS_RTOS_TASK_H__ - -#ifndef __ARCH_SCHEDULE_TASK_H__ -#define __ARCH_SCHEDULE_TASK_H__ - -/** - * \brief Returns main task data. - * \return Pointer to pointer of main task data. - */ -struct task **task_main_get(void); - -/** - * \brief Returns current system context. - */ -volatile void *task_context_get(void); - -/** - * \brief Switches system context. - * \param[in,out] task_ctx Task context to be set. - */ -void task_context_set(void *task_ctx); - -/** - * \brief Allocates task context. - * \param[in,out] task_ctx Assigned to allocated structure on return. - */ -int task_context_alloc(void **task_ctx); - -/** - * \brief Initializes task context. - * \param[in,out] task_ctx Task context to be initialized. - * \param[in] entry Entry point for task execution. - * \param[in] arg0 First argument to be passed to entry function. - * \param[in] arg1 Second argument to be passed to entry function. - * \param[in] task_core Id of the core that task will be executed on. - * \param[in] stack Address of the stack, if NULL then allocated internally. - * \param[in] stack_size Size of the stack, ignored if stack is NULL. - */ -int task_context_init(void *task_ctx, void *entry, void *arg0, void *arg1, - int task_core, void *stack, int stack_size); - -/** - * \brief Frees task context. - * \param[in,out] task_ctx Task with context to be freed. - */ -void task_context_free(void *task_ctx); - -/** - * \brief Performs cache operation on task's context. - * \param[in,out] task_ctx Context to be wtb/inv. - * \param[in] cmd Cache operation to be performed. - */ -void task_context_cache(void *task_ctx, int cmd); - -#endif /* __ARCH_SCHEDULE_TASK_H__ */ - -#else - -#error "This file shouldn't be included from outside of XTOS's rtos/task.h" - -#endif /* __XTOS_RTOS_TASK_H__ */ diff --git a/src/arch/xtensa/include/arch/sof.h b/src/arch/xtensa/include/arch/sof.h deleted file mode 100644 index 622b1e417eff..000000000000 --- a/src/arch/xtensa/include/arch/sof.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Janusz Jankowski - */ - -#ifdef __XTOS_RTOS_SOF_H__ - -#ifndef __ARCH_SOF_H__ -#define __ARCH_SOF_H__ - -/* entry point to main firmware */ -void _ResetVector(void); - -void boot_primary_core(void); - -#endif /* __ARCH_SOF_H__ */ - -#else - -#error "This file shouldn't be included from outside of XTOS's rtos/sof.h" - -#endif /* __XTOS_RTOS_SOF_H__ */ diff --git a/src/arch/xtensa/include/arch/spinlock.h b/src/arch/xtensa/include/arch/spinlock.h deleted file mode 100644 index ee6cfc4670a1..000000000000 --- a/src/arch/xtensa/include/arch/spinlock.h +++ /dev/null @@ -1,132 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifdef __XTOS_RTOS_SPINLOCK_H__ - -#ifndef __ARCH_SPINLOCK_H__ -#define __ARCH_SPINLOCK_H__ - -#include -#include - -struct k_spinlock { - volatile uint32_t lock; -#if CONFIG_DEBUG_LOCKS - uint32_t user; -#endif -}; - -static inline void arch_spinlock_init(struct k_spinlock *lock) -{ - lock->lock = 0; -} - -#if XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ - -static inline void arch_spin_lock(struct k_spinlock *lock) -{ - uint32_t result; - - __asm__ __volatile__( - " movi %0, 0\n" - " l32ex %0, %1\n" - "1: movi %0, 1\n" - " s32ex %0, %1\n" - " getex %0\n" - " bnez %0, 1b\n" - : "=&a" (result) - : "a" (&lock->lock) - : "memory"); -} - -#elif XCHAL_HAVE_S32C1I - -static inline void arch_spin_lock(struct k_spinlock *lock) -{ - uint32_t result; - - /* TODO: Should be platform specific, since on SMP platforms - * without uncached memory region we'll need additional cache - * invalidations in a loop - */ - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - "1: movi %0, 1\n" - " s32c1i %0, %1, 0\n" - " bnez %0, 1b\n" - : "=&a" (result) - : "a" (&lock->lock) - : "memory"); -} - -#else - -#if CONFIG_CORE_COUNT > 1 - -#error No atomic ISA for SMP configuration - -#endif /* CONFIG_CORE_COUNT > 1 */ - -/* - * The ISA has no atomic operations so use integer arithmetic on uniprocessor systems. - * This helps support GCC and qemu emulation of certain targets. - */ -static inline void arch_spin_lock(struct k_spinlock *lock) -{ - uint32_t result; - - do { - if (lock->lock == 0) { - lock->lock = 1; - result = 1; - } - } while (!result); -} - -#endif /* XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ */ - -#if XCHAL_HAVE_EXCLUSIVE || XCHAL_HAVE_S32C1I - -static inline void arch_spin_unlock(struct k_spinlock *lock) -{ - uint32_t result; - - __asm__ __volatile__( - " movi %0, 0\n" - " s32ri %0, %1, 0\n" - : "=&a" (result) - : "a" (&lock->lock) - : "memory"); -} - -#else - -#if CONFIG_CORE_COUNT > 1 - -#error No atomic ISA for SMP configuration - -#endif /* CONFIG_CORE_COUNT > 1 */ - -/* - * The ISA has no atomic operations so use integer arithmetic on uniprocessor systems. - * This helps support GCC and qemu emulation of certain targets. - */ -static inline void arch_spin_unlock(struct k_spinlock *lock) -{ - lock->lock = 0; -} - -#endif /* XCHAL_HAVE_EXCLUSIVE || XCHAL_HAVE_S32C1I */ - -#endif /* __ARCH_SPINLOCK_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/spinlock.h" - -#endif /* __SOF_SPINLOCK_H__ */ diff --git a/src/arch/xtensa/include/arch/string.h b/src/arch/xtensa/include/arch/string.h deleted file mode 100644 index 7832337f2059..000000000000 --- a/src/arch/xtensa/include/arch/string.h +++ /dev/null @@ -1,80 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifdef __XTOS_RTOS_STRING_H__ - -#ifndef __ARCH_STRING_H__ -#define __ARCH_STRING_H__ - -#include -#include -#include -#include - -#define arch_memcpy(dest, src, size) \ - xthal_memcpy(dest, src, size) - -#define bzero(ptr, size) \ - memset_s(ptr, size, 0, size) - -void *xthal_memcpy(void *dst, const void *src, size_t len); - -int memset_s(void *dest, size_t dest_size, - int data, size_t count); -int memcpy_s(void *dest, size_t dest_size, - const void *src, size_t count); - -void *__vec_memcpy(void *dst, const void *src, size_t len); -void *__vec_memset(void *dest, int data, size_t src_size); - -static inline int arch_memcpy_s(void *dest, size_t dest_size, - const void *src, size_t count) -{ - if (!dest || !src) - return -EINVAL; - - if ((dest >= src && (char *)dest < ((char *)src + count)) || - (src >= dest && (char *)src < ((char *)dest + dest_size))) - return -EINVAL; - - if (count > dest_size) - return -EINVAL; - -#if __XCC__ && XCHAL_HAVE_HIFI3 && !CONFIG_LIBRARY - __vec_memcpy(dest, src, count); -#else - memcpy(dest, src, count); -#endif - - return 0; -} - -static inline int arch_memset_s(void *dest, size_t dest_size, - int data, size_t count) -{ - if (!dest) - return -EINVAL; - - if (count > dest_size) - return -EINVAL; - -#if __XCC__ && XCHAL_HAVE_HIFI3 && !CONFIG_LIBRARY - if (!__vec_memset(dest, data, count)) - return -ENOMEM; -#else - memset(dest, data, count); -#endif - return 0; -} - -#endif /* __ARCH_STRING_H__ */ - -#else - -#error "This file shouldn't be included from outside of sof/string.h" - -#endif /* __SOF_STRING_H__ */ diff --git a/src/arch/xtensa/include/xtensa/board.h b/src/arch/xtensa/include/xtensa/board.h deleted file mode 100644 index c6b04a250575..000000000000 --- a/src/arch/xtensa/include/xtensa/board.h +++ /dev/null @@ -1,28 +0,0 @@ -/* This header is supposed to be obtained from /xtensa/board.h - using a -I directive passed to the compiler. */ - -#error "Unspecified board. Missing -I directive to select supported Xtensa board, usually -I XTENSA_TOOLS_ROOT/xtensa-elf/include/xtensa/ (XTENSA_TOOLS_ROOT is root of Xtensa Tools install, see xt-run --show-config=xttools)" - -/* - * Copyright (c) 2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - diff --git a/src/arch/xtensa/include/xtensa/c6x-compat.h b/src/arch/xtensa/include/xtensa/c6x-compat.h deleted file mode 100755 index ca91bd718397..000000000000 --- a/src/arch/xtensa/include/xtensa/c6x-compat.h +++ /dev/null @@ -1,1758 +0,0 @@ -/* - * Copyright (c) 2006-2010 Tensilica Inc. ALL RIGHTS RESERVED. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#ifndef __C6X_COMPAT__H -#define __C6X_COMPAT__H - -/* Unimplemented functions _gmpy, _gmpy4, _xormpy, _lssub, _cmpy, _cmpyr, - _cmpyr1, _ddotpl2r, _ddotph2r */ - - -typedef long long C6X_COMPAT_LONG40; - - -#define _memd8(a) (*((double*)(a))) -#define _memd8_const(a) (*((const double*)(a))) - -#define _amemd8(a) (*((double*)(a))) -#define _amemd8_const(a) (*((const double*)(a))) - -#define _mem8(a) (*((unsigned long long*)(a))) -#define _mem8_const(a) (*((const unsigned long long*)(a))) - -#define _mem4(a) (*((unsigned*)(a))) -#define _mem4_const(a) (*((const unsigned*)(a))) -#define _amem4_const(a) (*((const unsigned*)(a))) - -/* NOTE: To emulate a C6X properly you should define global variables - for your Xtensa with these names. Some of the emulation routines - will set these values. */ - -extern int _carry; -extern int _overflow; - -// Utility routines - - -#define TESTBIT(x,n) (((x) >> (n)) & 1) - -#define NSA_BITS 32 - -static inline unsigned int norm_shift_amt_U_and_non_U(int is_signed, int inp) { -int j=0, k=0; -int x=inp; -if (is_signed) { - /* Invert signed val if negative */ - x= TESTBIT(x,(NSA_BITS-1))? ~x: x; - x= (x&1)|(x<<1); /* Shift up to return count-1 */ - if (x ==0) - return NSA_BITS-1; - } - if (x ==0) - return NSA_BITS; - /* Now count leading zeros */ - for (j=0, k=NSA_BITS-1; k>=0; j++, k--) { - if (TESTBIT(x,k)) - return j; - } - return NSA_BITS; -} - - - -static inline long long -orig_L40_set( long long L40_var1) { - long long L40_var_out; - - L40_var_out = L40_var1 & 0x000000ffffffffffLL; - - if( L40_var1 & 0x8000000000LL) - L40_var_out = L40_var_out | 0xffffff0000000000LL; - - return( L40_var_out); -} - - - -static inline signed long long -util_saturate_n_no_state(signed long long t, int n) -{ - signed long long maxv, minv; - maxv = (1LL << (n-1)) - 1; - minv = (-1LL << (n-1)); - if (t > maxv) { - t = maxv; - } else if (t < minv) { - t = minv; - } - return t; -} - - -static inline signed long long -util_saturate_n_sgn(signed long long t, int n) -{ - signed long long result; - signed long long maxv, minv; - maxv = (1LL << (n-1)) - 1; - minv = (-1LL << (n-1)); - if (t > 0) { - result = maxv; - _overflow = 1; - } else if (t < 0) { - result = minv; - _overflow = 1; - } else { - result = 0; - } - return result; -} - - - - -/* well-behaved signed shift right (left on negative) with - saturation */ -static inline signed long long -util_shift_right_saturate_n(signed long long t, int shval, int n) -{ - /* n should be <= 62 */ - long long result; - - signed long long mask; - int actual_shift = shval; - long long shft = actual_shift > 0 ? actual_shift : -actual_shift; - - if (t == 0 || actual_shift == 0) - return t; - - if (actual_shift >= n) { - return (t < 0) ? -1 : 0; - } - if (actual_shift <= -n) { - return util_saturate_n_sgn(t, n); - } - if (actual_shift > 0) { - return t >> actual_shift; - } - /* actual_shift < 0. Check for saturation after shift. */ - mask = (-1LL << (n-shft-1)); - if (t > 0 && ((mask & t) != 0)) { - return util_saturate_n_sgn(t, n); - } - if (t < 0 && ((mask & t) != mask)) { - return util_saturate_n_sgn(t, n); - } - result = t << shft; - - return result; -} - - -/* Implemented c6x standard C compatibility functions (alphabetical - order) */ - - -static inline int _abs(int src1) { - if ((unsigned) src1 == (unsigned) 0x80000000) { - return 0x7fffffff; - } - return abs(src1); -} - - -static inline int _abs2(int src1) { - short s1[2],r[2]; - int result; - *((int*)s1) = src1; - if ((unsigned short) s1[1] == (unsigned short) 0x8000) r[1] = 0x7fff; - else r[1] = abs(s1[1]); - if ((unsigned short) s1[0] == (unsigned short) 0x8000) r[0] = 0x7fff; - else r[0] = abs(s1[0]); - result = *(int*)r; - return result; - } - - - - -static inline int _add2(int src1, int src2) { - short s1[2], s2[2], r[2]; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r[0] = s1[0] + s2[0]; - r[1] = s1[1] + s2[1]; - result = *(int*)r; - return result; -} - -static inline int _add4(int src1, int src2) { - char c1[4], c2[4], r[4]; - int result; - *((int*)c1) = src1; - *((int*)c2) = src2; - r[0] = c1[0] + c2[0]; - r[1] = c1[1] + c2[1]; - r[2] = c1[2] + c2[2]; - r[3] = c1[3] + c2[3]; - result = *(int*)r; - return result; -} - - - -static inline long long _addsub(unsigned int src1, unsigned int src2) -{ - - int res_lo; - int res_hi; - - res_hi = src1+src2; - res_lo = src1-src2; - return (((unsigned long long) res_hi) << 32) | ((unsigned int) res_lo) ; -} - - -static inline long long _addsub2(unsigned int src1, unsigned int src2) -{ - short s1[2], s2[2], ra[2], rs[2]; - int res_lo; - int res_hi; - - *((int*)s1) = src1; - *((int*)s2) = src2; - ra[0] = s1[0] + s2[0]; - ra[1] = s1[1] + s2[1]; - rs[0] = s1[0] - s2[0]; - rs[1] = s1[1] - s2[1]; - - res_hi = *(int*)ra; - res_lo = *(int*)rs; - return (((unsigned long long) res_hi) << 32) | ((unsigned int) res_lo) ; -} - - -static inline int _avg2(int src1, int src2) { - int low = (((int)1 + (short) src1 + (short) src2) >> 1) & 0XFFFF; - int high1 = src1 >> 16; - int high2 = src2 >> 16; - int high = ((high1 + high2 + 1) >> 1)<< 16; - return high | low; -} - - - -static inline unsigned int _avgu4(unsigned int src1, unsigned int src2) { -unsigned int res0 = ((src1 & 0xFF) + (src2 & 0xFF) + 1) >> 1; - unsigned int res1 = (((src1 & 0xFF00) >> 8) + ((src2 & 0xFF00) >> 8) + 1) >> 1; - unsigned int res2 = (((src1 & 0xFF0000) >> 16) + ((src2 & 0xFF0000) >> 16) + 1) >> 1; - unsigned int res3 = (((src1 & 0xFF000000) >> 24) + ((src2 & 0xFF000000) >> 24) + 1) >> 1; - return (res3 << 24) | (res2 << 16) | (res1 << 8) | res0; -} - - -static inline int TEN_popc (unsigned char b) -{ - int i, result = 0; - for (i = 0; i < 8; i++){ - if (b & 0x1) - result++; - b >>= 1; - } - return result; -} - -static inline unsigned int _bitc4(unsigned int src1) -{ - unsigned int res0 = TEN_popc(src1 & 0xFF); - unsigned int res1 = TEN_popc((src1 & 0xFF00) >> 8); - unsigned int res2 = TEN_popc((src1 & 0xFF0000) >> 16); - unsigned int res3 = TEN_popc((src1 & 0xFF000000) >> 24); - return (res3 << 24) | (res2 << 16) | (res1 << 8) | res0; -} - -static inline unsigned int _bitr(unsigned int src) { - int i; - unsigned r = 0; - for (i = 0; i< 32; ++i) { - r = r | (((src >> i) & 1)<<(31-i)); - } - return r; -} - - -static inline unsigned int _clr(unsigned int src2, int csta, int cstb) -{ - csta &= 0x1f; - cstb &= 0x1f; - if (csta > cstb) - return src2; - else { - unsigned int mask = (((1 << (cstb - csta)) << 1) - 1) << csta; - return src2 & (~mask); - } -} - -static inline unsigned int _clrr(unsigned int src2, int src1) -{ - unsigned int csta = (src1 >> 5) & 0x1f; - unsigned int cstb = src1 & 0x1f; - if (csta > cstb) - return src2; - else { - unsigned int mask = (((1 << (cstb - csta)) << 1) - 1) << csta; - return src2 & (~mask); - } -} - - - - -static inline int _cmpeq2(int src1, int src2) { - short s1[2], s2[2]; - int r0, r1; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r0 = s1[0] == s2[0] ? 1 : 0; - r1 = s1[1] == s2[1] ? 1 : 0; - result = (r1 << 1) | r0; - return result; -} - -static inline int _cmpeq4(int src1, int src2) { - char s1[4], s2[4]; - int r0, r1, r2, r3; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r0 = s1[0] == s2[0] ? 1 : 0; - r1 = s1[1] == s2[1] ? 1 : 0; - r2 = s1[2] == s2[2] ? 1 : 0; - r3 = s1[3] == s2[3] ? 1 : 0; - result = (r3 << 3) | (r2 << 2) | (r1 << 1) | r0; - return result; -} - - -static inline int _cmpgt2(int src1, int src2) { - short s1[2], s2[2]; - int r1, r0; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r0 = s1[0] > s2[0] ? 1 : 0; - r1 = s1[1] > s2[1] ? 1 : 0; - result = (r1<<1) | r0; - return result; -} - - -static inline unsigned int _cmpgtu4(unsigned int src1, unsigned int src2) { - unsigned int s1_0 = (src1 & 0xFF); - unsigned int s1_1 = (src1 & 0xFF00) >> 8; - unsigned int s1_2 = (src1 & 0xFF0000) >> 16; - unsigned int s1_3 = (src1 & 0xFF000000) >> 24; - - unsigned int s2_0 = (src2 & 0xFF); - unsigned int s2_1 = (src2 & 0xFF00) >> 8; - unsigned int s2_2 = (src2 & 0xFF0000) >> 16; - unsigned int s2_3 = (src2 & 0xFF000000) >> 24; - - unsigned int result = 0; - - if (s1_0 > s2_0) - result |= 0x1; - - if (s1_1 > s2_1) - result |= 0x2; - - if (s1_2 > s2_2) - result |= 0x4; - - if (s1_3 > s2_3) - result |= 0x8; - - return result; -} - - - - -static inline long long _ddotp4(unsigned int src1, unsigned int src2) { - unsigned int res0, res1; - short s1_0 = (src1 & 0xffff); - short s1_1 = (src1 & 0xfff0000) >> 16; - - unsigned short s2_0 = (src2 & 0xff); - unsigned short s2_1 = (src2 & 0xff00) >> 8; - unsigned short s2_2 = (src2 & 0xff0000) >> 16; - unsigned short s2_3 = (src2 & 0xff000000) >> 24; - - res0 = ((int)s1_0) * s2_0 + ((int)s1_1) * s2_1; - res1 = ((int)s1_0) * s2_2 + ((int)s1_1) * s2_3; - - return (res1 << 16) | res0; -} - - -static inline long long _ddotph2(long long src1_o_src1_e, unsigned int src2) -{ - - unsigned int src1_o = src1_o_src1_e >> 32; - unsigned int src1_e = src1_o_src1_e & 0xFFFFFFFF; - short ls1_o = src1_o & 0XFFFF; - short hs1_o = src1_o >> 16; -// short ls1_e = src1_e & 0XFFFF; - short hs1_e = src1_e >> 16; - short ls2 = src2 & 0XFFFF; - short hs2 = src2 >> 16; - - unsigned long long res_hi = ls2 * ls1_o + hs2 * hs1_o; - unsigned int res_lo = ls1_o * hs2 + hs1_e * ls2; - return (res_hi << 32) | res_lo; -} - - -static inline long long _ddotpl2(long long src1_o_src1_e, unsigned int src2) -{ - unsigned int src1_o = src1_o_src1_e >> 32; - unsigned int src1_e = src1_o_src1_e & 0xFFFFFFFF; - short ls1_o = src1_o & 0XFFFF; -// short hs1_o = src1_o >> 16; - short ls1_e = src1_e & 0XFFFF; - short hs1_e = src1_e >> 16; - short ls2 = src2 & 0XFFFF; - short hs2 = src2 >> 16; - - unsigned long long res_hi = ls2 * hs1_e + hs2 * ls1_o; - unsigned res_lo = hs1_e * hs2 + ls1_e * ls2; - return (res_hi << 32) | res_lo; -} - - -static inline unsigned int _deal(unsigned int src) -{ - int i; - unsigned short lo = 0, hi = 0; - for (i = 0; i < 32; i+= 2) { - lo >>= 1; - lo |= (src & 0x1) << 15; - src >>= 1; - hi >>= 1; - hi |= (src & 0x1) << 15; - src >>= 1; - } - return (hi << 16) | lo; -} - - -static inline long long _dmv(unsigned int src1, unsigned int src2) -{ - return (((long long) src1) << 32) | src2; -} - - -static inline int _dotpn2(int src1, int src2) { -short int s1_h = src1>>16; - short int s1_l = src1; - short int s2_h = src2>>16; - short int s2_l = src2; - return s1_h * s2_h - s1_l * s2_l; -} - - -static inline int _dotp2(int src1, int src2) { - short int s1_h = src1>>16; - short int s1_l = src1; - short int s2_h = src2>>16; - short int s2_l = src2; - return s1_h * s2_h + s1_l * s2_l; -} - - - -static inline int _dotpnrsu2(int src1, unsigned int src2) -{ - short ls1 = src1 & 0XFFFF; - unsigned short ls2 = src2 & 0XFFFF; - short hs1 = src1 >> 16; - unsigned short hs2 = src2 >> 16; - - int result = (((long long) (int)(hs1 * hs2)) - ((long long) (int)(ls1 * ls2)) + (1 << 15)) >> 16; - return result; -} - - - -static inline int _dotprsu2(int src1, unsigned int src2) { - short ls1 = src1 & 0XFFFF; - unsigned short ls2 = (src2 & 0XFFFF); - short hs1 = src1 >> 16; - unsigned short hs2 = (src2 >> 16); - - int result = (((long long) (int) (ls1 * ls2)) + ((long long) (int) (hs1 * hs2)) + (1LL << 15)) >> 16; - return result; -} - - - - - - - -static inline int _dotpsu4(int src1, unsigned int src2) { - int result; - signed char s1_0 = (src1 & 0xff); - signed char s1_1 = (src1 & 0xff00) >> 8; - signed char s1_2 = (src1 & 0xff0000) >> 16; - signed char s1_3 = (src1 & 0xff000000) >> 24; - - unsigned int s2_0 = (src2 & 0xff); - unsigned int s2_1 = (src2 & 0xff00) >> 8; - unsigned int s2_2 = (src2 & 0xff0000) >> 16; - unsigned int s2_3 = (src2 & 0xff000000) >> 24; - - result = s1_0 * s2_0 + s1_1 * s2_1 + s1_2 * s2_2 + s1_3 * s2_3; - return result; -} - - -static inline unsigned int _dotpu4(unsigned int src1, unsigned int src2) { - unsigned char v1_0 = src1 & 0xff; - unsigned char v1_1 = (src1>>8) & 0xff; - unsigned char v1_2 = (src1>>16) & 0xff; - unsigned char v1_3 = (src1>>24) & 0xff; - - unsigned char v2_0 = src2 & 0xff; - unsigned char v2_1 = (src2>>8) & 0xff; - unsigned char v2_2 = (src2>>16) & 0xff; - unsigned char v2_3 = (src2>>24) & 0xff; - - unsigned v = v1_0 * v2_0 + v1_1 * v2_1 + v1_2 * v2_2 + v1_3 * v2_3; - return v; -} - - -static inline long long _dpack2(unsigned int src1, unsigned int src2){ -unsigned short s1[2], s2[2]; -*((int*)s1) = src1; -*((int*)s2) = src2; -return ((unsigned long long) s1[1] << 48) | ((unsigned long long) s2[1] << 32) | ((unsigned long long) s1[0] << 16) | ((unsigned long long) s2[0]); -} - - -static inline long long _dpackx2(unsigned int src1, unsigned int src2){ -unsigned short s1[2], s2[2]; -*((int*)s1) = src1; -*((int*)s2) = src2; -return ((unsigned long long) s2[0] << 48) | ((unsigned long long) s1[1] << 32) | ((unsigned long long) s1[0] << 16) | ((unsigned long long) s2[1]); -} - -static inline int _ext(int src2, unsigned int csta, unsigned int cstb) -{ - return (src2 << csta) >> cstb; -} - -static inline int _extr(int src2, int src1) -{ - unsigned int csta = (src1 >> 5) & 0x1f; - unsigned int cstb = src1 & 0x1f; - return (src2 << csta) >> cstb; -} - -static inline unsigned int _extu(unsigned int src2, unsigned int csta, unsigned int cstb) -{ - return (src2 << csta) >> cstb; -} - -static inline unsigned int _extur(unsigned int src2, int src1) -{ - unsigned int csta = (src1 >> 5) & 0x1f; - unsigned int cstb = src1 & 0x1f; - return (src2 << csta) >> cstb; -} - - -static inline unsigned long long _hi(double src) { - unsigned long long v; - *(double*)&v = src; - return v>>32; -} - -static inline unsigned int _hill (long long src) -{ - return (unsigned int) (src >> 32); -} - - - -static inline double _itod(unsigned hi, unsigned lo) { - double v; - unsigned long long ll = ((((unsigned long long)(hi))<<32) | (unsigned long long)((unsigned)lo)); - *((unsigned long long *)&v) = ll; - return v; -} - - -static inline long long _itoll(unsigned int src2, unsigned int src1) -{ - return (((long long) src2) << 32) | src1; -} - - -static inline C6X_COMPAT_LONG40 _labs(C6X_COMPAT_LONG40 src2) -{ - long long maxv = (1LL << (40 -1)) - 1; - long long minv = (-1LL << (40 - 1)); - C6X_COMPAT_LONG40 lres = orig_L40_set(src2); - - lres = lres < 0 ? -lres : lres; - if (lres > maxv) lres = maxv; - else if (lres < minv) lres = minv; - - return lres; -} - - -static inline C6X_COMPAT_LONG40 _ldotp2(int src1, int src2) { -return (C6X_COMPAT_LONG40) _dotp2(src1, src2); -} - - -static inline unsigned int _lmbd(unsigned int src1, unsigned int src2) -{ - return norm_shift_amt_U_and_non_U(0,(((int) (src1 << 31)) >> 31) ^ (~src2)); -} - - -static inline unsigned int _lnorm(C6X_COMPAT_LONG40 src2) { -if (src2 == 0) - return 39; - else { - int hi = (int)(src2 >> 32); - int lo = (int)src2; - - - long long temp = (unsigned long long)(unsigned)lo | (unsigned long long)hi << 32; - temp = orig_L40_set(temp); - - if (temp == 0) return 0; - int cnt = 0; - while (((temp >> 39) & 1) == ((temp >> 38) & 1)) { - temp <<= 1; - cnt++; - } - return cnt; - } -} - - -static inline unsigned long long _lo(double src) { - unsigned long long v; - *(double*)&v = src; - return v; -} - - -static inline unsigned int _loll (long long src) -{ - return (unsigned int) src; -} - - -static inline C6X_COMPAT_LONG40 _lsadd(int src1, C6X_COMPAT_LONG40 src2) -{ - long long maxv = (1LL << (40 -1)) - 1; - long long minv = (-1LL << (40 - 1)); - int hi = (int)(src2 >> 32); - int lo = (int)src2; - long long src2_int = (unsigned long long)(unsigned)lo | (unsigned long long)hi << 32; - - - long long src2_int2 = orig_L40_set(src2_int); - - long long res = src1 + src2_int2; - - if (res > maxv) { - res = maxv; - _overflow = 1; - } - else if (res < minv) { - res = minv; - _overflow = 1; - } - - long long res2 = orig_L40_set(res); - - res2 = (signed char)(res2 >> 32); - - C6X_COMPAT_LONG40 lres = (((C6X_COMPAT_LONG40) res2) << 32) | ((unsigned int)res); - return lres; -} - - - -static inline int _max2 (int src1, int src2) { - short s1[2], s2[2], r[2]; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r[0] = s1[0] > s2[0] ? s1[0] : s2[0]; - r[1] = s1[1] > s2[1] ? s1[1] : s2[1]; - result = *(int*)r; - return result; -} - - - - - - -static inline unsigned int _maxu4(unsigned int src1, unsigned int src2) { - unsigned int res0, res1, res2, res3; - unsigned int s1_0 = res0 = (src1 & 0xFF); - unsigned int s1_1 = res1 = (src1 & 0xFF00) >> 8; - unsigned int s1_2 = res2 = (src1 & 0xFF0000) >> 16; - unsigned int s1_3 = res3 = (src1 & 0xFF000000) >> 24; - - unsigned int s2_0 = (src2 & 0xFF); - unsigned int s2_1 = (src2 & 0xFF00) >> 8; - unsigned int s2_2 = (src2 & 0xFF0000) >> 16; - unsigned int s2_3 = (src2 & 0xFF000000) >> 24; - -// unsigned int res = 0; - - if (s1_0 < s2_0) - res0 = s2_0; - - if (s1_1 < s2_1) - res1 = s2_1; - - if (s1_2 < s2_2) - res2 = s2_2; - - if (s1_3 < s2_3) - res3 = s2_3; - - return (res3 << 24) | (res2 << 16) | (res1 << 8) | res0; - - -} - -static inline int _min2(int src1, int src2) { - short s1[2], s2[2], r[2]; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r[0] = s1[0] < s2[0] ? s1[0] : s2[0]; - r[1] = s1[1] < s2[1] ? s1[1] : s2[1]; - result = *(int*)r; - return result; -} - - -static inline unsigned int _minu4(unsigned int src1, unsigned int src2) { -unsigned int res0, res1, res2, res3; - unsigned int s1_0 = res0 = (src1 & 0xFF); - unsigned int s1_1 = res1 = (src1 & 0xFF00) >> 8; - unsigned int s1_2 = res2 = (src1 & 0xFF0000) >> 16; - unsigned int s1_3 = res3 = (src1 & 0xFF000000) >> 24; - - unsigned int s2_0 = (src2 & 0xFF); - unsigned int s2_1 = (src2 & 0xFF00) >> 8; - unsigned int s2_2 = (src2 & 0xFF0000) >> 16; - unsigned int s2_3 = (src2 & 0xFF000000) >> 24; - -// unsigned int res = 0; - - if (s1_0 > s2_0) - res0 = s2_0; - - if (s1_1 > s2_1) - res1 = s2_1; - - if (s1_2 > s2_2) - res2 = s2_2; - - if (s1_3 > s2_3) - res3 = s2_3; - - return (res3 << 24) | (res2 << 16) | (res1 << 8) | res0; -} - - -static inline int _mpy(int src1, int src2) { -return (short) src1 * (short) src2; -} - - -static inline int _mpyh(int src1, int src2) { -return (short) (src1 >> 16) * (short) (src2 >> 16); -} - - -static inline long long _mpyhill (int src1, int src2) -{ - short s1 = src1 >> 16; - return ((long long) src2) * s1; -} - -static inline int _mpyhir(int src1, int src2) -{ - short s1 = src1 >> 16; - long long result = ((long long) src2) * s1 + (1 << 14); - result >>= 15; - return result; -} - - -static inline int _mpyhl(int src1, int src2) { -return (short) (src1 >> 16) * (short) (src2); -} - -static inline unsigned int _mpyhlu(unsigned int src1, unsigned int src2) { -return (unsigned short) (src1 >> 16) * (unsigned short) (src2); -} - -static inline int _mpyhslu(int src1, unsigned int src2) { -return (short) (src1 >> 16) * (unsigned short) src2; -} - - -static inline int _mpyhsu(int src1, unsigned int src2) { -return (short) (src1 >>16) * (unsigned short) (src2 >>16); -} - - -static inline unsigned int _mpyhu(unsigned int src1, unsigned int src2) { -return (unsigned short) (src1 >>16) * (unsigned short) (src2 >> 16); -} - - -static inline int _mpyhuls(unsigned int src1, int src2) { -return (unsigned short) (src1 >>16) * (signed short) (src2); -} - - -static inline int _mpyhus(unsigned int src1, int src2) { -return (unsigned short) (src1 >> 16) * (short) (src2 >>16); -} - - - -static inline long long _mpyidll (int src1, int src2) -{ - return (long long) src1 * src2; -} - - -static inline int _mpylh(int src1, int src2) { -return (signed short) (src1 & 0xffff) * (signed short) (src2 >> 16); -} - -static inline unsigned int _mpylhu(unsigned int src1, unsigned int src2) { -return (unsigned short) src1 * (unsigned short) (src2 >> 16); -} - - -static inline long long _mpylill (int src1, int src2) -{ - return ((long long) src2) * ((short)src1); -} - - - -static inline int _mpylir(int src1, int src2) -{ - short s1 = src1; - long long result = ((long long) src2) * s1 + (1 << 14); - result >>= 15; - return result; -} - - -static inline int _mpylshu(int src1, unsigned int src2) { -return (short) src1 * (unsigned short) (src2 >> 16); -} - - -static inline int _mpyluhs(unsigned int src1, int src2) { -return (unsigned short) src1 * (short) (src2 >> 16); -} - - - -static inline int _mpysu(int src1, unsigned int src2) { -return (short) src1 * (unsigned short) src2; -} - - - -static inline long long _mpysu4ll (int src1, unsigned int src2) { - unsigned short res0, res1, res2, res3; - signed char s1_0 = (src1 & 0xff); - signed char s1_1 = (src1 & 0xff00) >> 8; - signed char s1_2 = (src1 & 0xff0000) >> 16; - signed char s1_3 = (src1 & 0xff000000) >> 24; - - unsigned short s2_0 = (src2 & 0xff); - unsigned short s2_1 = (src2 & 0xff00) >> 8; - unsigned short s2_2 = (src2 & 0xff0000) >> 16; - unsigned short s2_3 = (src2 & 0xff000000) >> 24; - - res0 = s1_0 * s2_0; - res1 = s1_1 * s2_1; - res2 = s1_2 * s2_2; - res3 = s1_3 * s2_3; - - return (((unsigned long long) res3) << 48) - | (((unsigned long long) res2) << 32) - | (((unsigned long long) res1) << 16) - | res0; -} - -static inline unsigned int _mpyu(unsigned int src1, unsigned int src2) { - unsigned v = (unsigned short)src1 * (unsigned short)src2; - return v; -} - -static inline int _mpyus(unsigned int src1, int src2) { -return (unsigned short) src1 * (short) src2; -} - -static inline long long _mpyu4ll (unsigned int src1, unsigned int src2) { - unsigned short res0, res1, res2, res3; - unsigned char s1_0 = (src1 & 0xff); - unsigned char s1_1 = (src1 & 0xff00) >> 8; - unsigned char s1_2 = (src1 & 0xff0000) >> 16; - unsigned char s1_3 = (src1 & 0xff000000) >> 24; - - unsigned short s2_0 = (src2 & 0xff); - unsigned short s2_1 = (src2 & 0xff00) >> 8; - unsigned short s2_2 = (src2 & 0xff0000) >> 16; - unsigned short s2_3 = (src2 & 0xff000000) >> 24; - - res0 = s1_0 * s2_0; - res1 = s1_1 * s2_1; - res2 = s1_2 * s2_2; - res3 = s1_3 * s2_3; - - return (((unsigned long long) res3) << 48) - | (((unsigned long long) res2) << 32) - | (((unsigned long long) res1) << 16) - | res0; -} - - -static inline long long _mpy2ir(unsigned int src1, unsigned int src2) -{ - if ((src1 == 0x8000) && (src2 == 0x80000000)) { - _overflow = 1; - return 0; - } - else { - short ls1 = src1 & 0xffff; - short hs1 = src1 >> 16; - unsigned long long hi = (((long long) hs1) * (int) src2 + (1 << 14)) >> 15; - unsigned long long lo = ((((long long) ls1) * (int) src2 + (1 << 14)) >> 15) & 0xFFFFFFFF; - return (hi << 32) | lo; - } -} - - -static inline long long _mpy2ll (int src1, int src2) { - short ls1 = src1 & 0xffff; - short hs1 = src1 >> 16; - short ls2 = src2 & 0xffff; - short hs2 = src2 >> 16; - - unsigned long long hi = hs1 * hs2; - unsigned long long lo = (ls1 * ls2) & 0xFFFFFFFF; - - return (hi << 32) | lo; - -} - - -static inline int _mpy32(int src1, int src2) -{ - return src1 * src2; -} - - -static inline long long _mpy32ll(int src1, int src2) -{ - return ((long long) src1) * src2; -} - -static inline long long _mpy32su(int src1, unsigned int src2) -{ - return ((long long) src1) * ((int) src2); -} - -static inline long long _mpy32u(unsigned int src1, unsigned int src2) -{ - return ((long long) ((int) src1)) * ((long long) ((int) src2)); -} - -static inline long long _mpy32us(unsigned int src1, int src2) -{ - return ((int) src1) * ((long long) src2); -} - -static inline int _mvd (int src2) -{ - return src2; -} - - -static inline unsigned int _norm(int src2) -{ - return norm_shift_amt_U_and_non_U(1,src2); -} - - -static inline unsigned int _pack2 (unsigned int src1, unsigned int src2) { - short s1[2], s2[2], r[2]; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r[0] = s2[0]; - r[1] = s1[0]; - result = *(int*)r; - return result; -} - - -static inline int _packh2 (unsigned int src1, unsigned int src2) { - unsigned v0 = src1 & 0xffff0000; - unsigned v1 = src2 >> 16; - unsigned v = v0|v1; - return v; - -} - -static inline unsigned int _packh4 (unsigned int src1, unsigned int src2) { - unsigned v3 = (src1 >> 24) & 0xff; - unsigned v2 = (src1 >> 8) & 0xff; - unsigned v1 = (src2 >> 24) & 0xff; - unsigned v0 = (src2 >> 8) & 0xff; - unsigned v = (v3<<24) | (v2<<16) | (v1 << 8) | v0; - return v; -} - -static inline unsigned int _packhl2 (unsigned int src1, unsigned int src2) { - unsigned v0 = src1 & 0xffff0000; - unsigned v1 = src2 & 0x0000ffff; - unsigned v = v0|v1; - return v; -} - -static inline unsigned int _packlh2 (unsigned int src1, unsigned int src2) { - unsigned v0 = src1 << 16; - unsigned v1 = (src2 >> 16) & 0xffff; - unsigned v = v0|v1; - return v; -} - - - - -static inline unsigned int _packl4 (unsigned int src1, unsigned int src2) { - unsigned v3 = (src1 >> 16) & 0xff; - unsigned v2 = (src1) & 0xff; - unsigned v1 = (src2 >> 16) & 0xff; - unsigned v0 = (src2) & 0xff; - unsigned v = (v3<<24) | (v2<<16) | (v1 << 8) | v0; - return v; -} - - - - -static inline unsigned int _rpack2 (unsigned int src1, unsigned int src2) { -int s1 = (int) src1; -int s2 = (int) src2; -s1 = util_shift_right_saturate_n (s1, -1, 32); -s2 = util_shift_right_saturate_n (s2, -1, 32); -return (unsigned int) (s1 & 0xffff0000) | (unsigned int) ((s2 & 0xffff0000) >>16); -} - - -static inline unsigned int _rotl (unsigned int src1, unsigned int src2) -{ - src2 &= 0x1f; - return (src1 << src2) | (src1 >> (32 - src2)); -} - - -static inline int _sadd(int src1, int src2) { -signed long long res; -signed long long maxv, minv; -maxv = (1LL << (32-1)) - 1; -minv = (-1LL << (32-1)); -res = (long long) src1 + (long long) src2; -if (res > maxv) { - res = maxv; - _overflow = 1; - } -else if (res < minv ) { - res = minv; - _overflow = 1; - } -return (int) res; -} - -static inline long long _saddsub(unsigned int src1, unsigned int src2) { -int radd; -signed long long rsub; - -signed long long maxv, minv; -maxv = (1LL << (32-1)) - 1; -minv = (-1LL << (32-1)); - -radd = (int) src1 + (int) src2; - -// saturate on subtract, not add - - -rsub = (long long) ((int) src1) - (long long) ((int) src2); -if (rsub > maxv) { - rsub = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (rsub < minv ) { - rsub = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - -return (((unsigned long long) radd) << 32) | ( rsub & 0x00000000ffffffff ) ; -} - - - -static inline long long _saddsub2(unsigned int src1, unsigned int src2) { -signed int radd[2]; -signed int rsub[2]; -signed short s1[2], s2[2]; - -signed int maxv, minv; -maxv = (1L << (16-1)) - 1; -minv = (-1L << (16-1)); - -*((int*)s1) = src1; -*((int*)s2) = src2; - -radd[0] = (int) s1[0] + (int) s2[0]; -radd[1] = (int) s1[1] + (int) s2[1]; - -rsub[0] = (int) s1[0] - (int) s2[0]; -rsub[1] = (int) s1[1] - (int) s2[1]; - -if (radd[0] > maxv) { - radd[0] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (radd[0] < minv ) { - radd[0] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - -if (radd[1] > maxv) { - radd[1] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (radd[1] < minv ) { - radd[1] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - - -if (rsub[0] > maxv) { - rsub[0] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (rsub[0] < minv ) { - rsub[0] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - -if (rsub[1] > maxv) { - rsub[1] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (rsub[1] < minv ) { - rsub[1] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - - -return ((((unsigned long long) radd[1]) & 0x000000000000ffff) << 48) | - ((((unsigned long long) radd[0]) & 0x000000000000ffff) << 32) | - ((((unsigned long long) rsub[1]) & 0x000000000000ffff) << 16) | - ((((unsigned long long) rsub[0]) & 0x000000000000ffff)); -} - - - -static inline int _sadd2(int src1, int src2) { -signed short s1[2], s2[2]; -signed int r[2], maxv, minv; - -maxv = (1L << (16-1)) - 1; -minv = (-1L << (16-1)); - - -*((int*)s1) = src1; -*((int*)s2) = src2; - -r[0] = (int) s1[0] + (int) s2[0]; -r[1] = (int) s1[1] + (int) s2[1]; - -if (r[0] > maxv) { - r[0] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (r[0] < minv ) { - r[0] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -if (r[1] > maxv) { - r[1] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (r[1] < minv ) { - r[1] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - -return ((r[1] & 0xffff) << 16 ) | (r[0] & 0xffff) ; -} - - -static inline int _saddus2(unsigned int src1, int src2) { -int res0, res1; - unsigned int s1_0 = (src1 & 0xffff); - unsigned int s1_1 = (src1 & 0xffff0000) >> 16; - - short s2_0 = (src2 & 0xffff); - short s2_1 = (src2 & 0xffff0000) >> 16; - - res0 = s1_0 + s2_0; - res1 = s1_1 + s2_1; - - if (res0 >= 0x10000) - res0 = 0xffff; - else if (res0 < 0) - res0 = 0; - - if (res1 >= 0x10000) - res1 = 0xffff; - else if (res1 < 0) - res1 = 0; - - return (res1 << 16) | res0; -} - - -static inline unsigned int _saddu4(unsigned int src1, unsigned int src2) { -unsigned int res0, res1, res2, res3; - unsigned int s1_0 = (src1 & 0xff); - unsigned int s1_1 = (src1 & 0xff00) >> 8; - unsigned int s1_2 = (src1 & 0xff0000) >> 16; - unsigned int s1_3 = (src1 & 0xff000000) >> 24; - - unsigned int s2_0 = (src2 & 0xff); - unsigned int s2_1 = (src2 & 0xff00) >> 8; - unsigned int s2_2 = (src2 & 0xff0000) >> 16; - unsigned int s2_3 = (src2 & 0xff000000) >> 24; - - res0 = s1_0 + s2_0; - res1 = s1_1 + s2_1; - res2 = s1_2 + s2_2; - res3 = s1_3 + s2_3; - - if (res0 >= 0x100) - res0 = 0xff; - - if (res1 >= 0x100) - res1 = 0xff; - - if (res2 >= 0x100) - res2 = 0xff; - - if (res3 >= 0x100) - res3 = 0xff; - - return (res3 << 24) | (res2 << 16) | (res1 << 8) | res0; - -} - - - -static inline int _sat(C6X_COMPAT_LONG40 src2) -{ - long long maxv = (1LL << (32-1)) - 1; - long long minv = (-1LL << (32-1)); - - int hi = (int)(src2 >> 32); - int lo = (int)src2; - long long temp = (unsigned long long)(unsigned)lo | (unsigned long long)hi << 32; - temp = orig_L40_set(temp); - - if (temp > maxv) { - temp = maxv; - _overflow = 1; - } - else if (temp < minv) { - temp = minv; - _overflow = 1; - } - return (int) temp; -} - -static inline unsigned int _set(unsigned int src2, unsigned int csta, unsigned int cstb) -{ - csta &= 0x1f; - cstb &= 0x1f; - if (csta > cstb) - return src2; - else { - unsigned int mask = (((1 << (cstb - csta)) << 1) - 1) << csta; - return src2 | mask; - } -} - -static inline unsigned int _setr(unsigned int src2, int src1) -{ - unsigned int csta = (src1 >> 5) & 0x1f; - unsigned int cstb = src1 & 0x1f; - if (csta > cstb) - return src2; - else { - unsigned int mask = (((1 << (cstb - csta)) << 1) - 1) << csta; - return src2 | mask; - } -} - - -static inline unsigned int _shfl (unsigned int src2) -{ - unsigned short lo = src2; - unsigned short hi = src2 >> 16; - unsigned int result = 0; - int i; - for (i = 0; i < 32; i+= 2) { - result >>= 1; - result |= (lo & 0x1) << 31; - lo >>= 1; - result >>= 1; - result |= (hi & 0x1) << 31; - hi >>= 1; - } - return result; -} - -static inline long long _shfl3 (unsigned int src1, unsigned int src2) -{ - unsigned short lo = src2; - unsigned short hi = src1 >> 16; - unsigned short mid = src1; - unsigned long long result = 0; - int i; - for (i = 0; i < 32; i+= 2) { - result >>= 1; - result |= ((unsigned long long) (lo & 0x1)) << 47; - lo >>= 1; - result >>= 1; - result |= ((unsigned long long) (mid & 0x1)) << 47; - mid >>= 1; - result >>= 1; - result |= ((unsigned long long) (hi & 0x1)) << 47; - hi >>= 1; - } - return result; -} - - - -static inline unsigned int _shlmb (unsigned int src1, unsigned int src2) -{ - return (src2 << 8) | (src1 >> 24); -} - -static inline unsigned int _shrmb (unsigned int src1, unsigned int src2) -{ - return (src2 >> 8) | (src1 << 24); -} - - -static inline unsigned int _shru2 (unsigned int src1, unsigned int src2) { -unsigned short hs1 = src1 >> 16; - unsigned short ls1 = src1 & 0xFFFF; - hs1 >>= src2; - ls1 >>= src2; - return (hs1 << 16) | ls1; -} - - -static inline int _shr2 (int src1, unsigned int src2) { - short s1[2], result[2]; - *((int*)s1) = src1; - src2 = src2 & 31; - result[0] = (int)s1[0] >> src2; - result[1] = (int)s1[1] >> src2; - - return *(int*)result; -} - - -static inline int _smpy (int src1, int src2) { -unsigned long long result; -result = (((short) src1 * (short) src2) << 1); - -if ((result & 0xffffffff) == 0x80000000){ - result = 0x7fffffff; - _overflow = 1; - } -return (int) (result); -} - -static inline int _smpyh (int src1, int src2) { -unsigned long long result; -result = ((short) (src1 >> 16) * (short) (src2 >> 16)) << 1; -if ((result & 0xffffffff) == 0x80000000){ - result = 0x7fffffff; - _overflow = 1; - } -return (int) (result); -} - -static inline int _smpyhl (int src1, int src2) { -unsigned long long result; -result = ((short) (src1 >> 16) * (short) (src2)) << 1; -if ((result & 0xffffffff) == 0x80000000){ - result = 0x7fffffff; - _overflow = 1; - } -return (int) (result); -} - -static inline int _smpylh (int src1, int src2) { -unsigned long long result; -result = ((short) (src1) * (short) (src2 >> 16)) << 1; -if ((result & 0xffffffff) == 0x80000000){ - result = 0x7fffffff; - _overflow = 1; - } -return (int) (result); -} - -static inline long long _smpy2ll (int src1, int src2) { - short ls1 = src1 & 0XFFFF; - short hs1 = src1 >> 16; - short ls2 = src2 & 0XFFFF; - short hs2 = src2 >> 16; - - unsigned long long hi = (hs1 * hs2) << 1; - unsigned long long lo = ((ls1 * ls2) << 1) & 0xFFFFFFFF; - if ((hi & 0xffffffff) == 0x80000000){ - hi = 0x7fffffff; - _overflow = 1; - } - - if ((lo & 0xffffffff) == 0x80000000){ - lo = 0x7fffffff; - _overflow = 1; - } - - return (hi << 32) | lo; -} - - - - -static inline int _smpy32(int src1, int src2) -{ - long long res = (long long) src1 * src2; - res <<= 1; - res >>= 32; - return res; -} - -static inline unsigned char TEN_satu8 (short src) -{ - if (src > 0xff) - return 0xff; - else if (src < 0) - return 0; - else - return src; -} - -static inline int _spack2 (int src1, int src2) { -short s1 = (short) util_saturate_n_no_state(src1,16); -short s2 = (short) util_saturate_n_no_state(src2,16); -return ( (unsigned int) s1 << 16) | (((int) s2) & 0xFFFF); -} - - -static inline unsigned int _spacku4 (int src1, int src2) { - short lolo = src2; - short lohi = src2 >> 16; - short hilo = src1; - short hihi = src1 >> 16; - - lolo = TEN_satu8(lolo); - lohi = TEN_satu8(lohi); - hilo = TEN_satu8(hilo); - hihi = TEN_satu8(hihi); - - return (((unsigned int) hihi) << 24) | (((unsigned int) hilo) << 16) | (lohi << 8) | lolo; -} - - - -static inline int _sshl (int src1, unsigned int src2) { -short local2 = (short)(src2 & 0x7FFF); -return (int) util_shift_right_saturate_n(src1, -local2, 32); -} - - - - -static inline int _sshvl (int src2, int src1) { - short s1; - if (src1 > 31) - s1 = 31; - else if (src1 < -31) - s1 = -31; - else - s1 = src1; - - return (int) util_shift_right_saturate_n(src2, -s1, 32); -} - - - - - -static inline int _sshvr (int src2, int src1) { -short s1; - if (src1 > 31) - s1 = 31; - else if (src1 < -31) - s1 = -31; - else - s1 = src1; - return (int) util_shift_right_saturate_n(src2, s1, 32); -} - - - - -static inline int _ssub(int src1, int src2) { -signed long long res; -signed long long maxv, minv; -maxv = (1LL << (32-1)) - 1; -minv = (-1LL << (32-1)); -res = (long long) src1 - (long long) src2; -if (res > maxv) { - res = maxv; - _overflow = 1; - } -else if (res < minv ) { - res = minv; - _overflow = 1; - } -return (int) res; -} - -static inline int _ssub2(int src1, int src2) { -signed short s1[2], s2[2]; -signed int r[2], maxv, minv; - -maxv = (1L << (16-1)) - 1; -minv = (-1L << (16-1)); - - -*((int*)s1) = src1; -*((int*)s2) = src2; - -r[0] = (int) s1[0] - (int) s2[0]; -r[1] = (int) s1[1] - (int) s2[1]; - -if (r[0] > maxv) { - r[0] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (r[0] < minv ) { - r[0] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -if (r[1] > maxv) { - r[1] = maxv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } -else if (r[1] < minv ) { - r[1] = minv; - /* NOTE: TI c6x does NOT set the overflow register even if results saturate */ - /* _overflow = 1; */ - } - -return ((r[1] & 0xffff) << 16 ) | (r[0] & 0xffff) ; -} - - -static inline int _subabs4 (int src1, int src2) { - int res0, res1, res2, res3; - unsigned int s1_0 = (src1 & 0xff); - unsigned int s1_1 = (src1 & 0xff00) >> 8; - unsigned int s1_2 = (src1 & 0xff0000) >> 16; - unsigned int s1_3 = (src1 & 0xff000000) >> 24; - - unsigned int s2_0 = (src2 & 0xff); - unsigned int s2_1 = (src2 & 0xff00) >> 8; - unsigned int s2_2 = (src2 & 0xff0000) >> 16; - unsigned int s2_3 = (src2 & 0xff000000) >> 24; - - res0 = s1_0 - s2_0; - res1 = s1_1 - s2_1; - res2 = s1_2 - s2_2; - res3 = s1_3 - s2_3; - - if (res0 < 0) - res0 = -res0; - - if (res1 < 0) - res1 = -res1; - - if (res2 < 0) - res2 = -res2; - - if (res3 < 0) - res3 = -res3; - - return (res3 << 24) | (res2 << 16) | (res1 << 8) | res0; -} - - -static inline unsigned int _subc (unsigned int src1, unsigned int src2) -{ - if ( src1 >= src2) - return ((src1 - src2) << 1) + 1; - else - return src1 << 1; -} - - - -static inline int _sub2(int src1, int src2) { - short s1[2], s2[2], r[2]; - int result; - *((int*)s1) = src1; - *((int*)s2) = src2; - r[0] = s1[0] - s2[0]; - r[1] = s1[1] - s2[1]; - result = *(int*)r; - return result; -} - - -static inline int _sub4(int src1, int src2) { - char c1[4], c2[4], r[4]; - int result; - *((int*)c1) = src1; - *((int*)c2) = src2; - r[0] = c1[0] - c2[0]; - r[1] = c1[1] - c2[1]; - r[2] = c1[2] - c2[2]; - r[3] = c1[3] - c2[3]; - result = *(int*)r; - return result; -} - - -static inline int _swap4 (unsigned int src1) { - unsigned char v0 = src1; - unsigned char v1 = src1 >> 8; - unsigned char v2 = src1 >> 16; - unsigned char v3 = src1 >> 24; - unsigned v = v0<<8 | v1 | v2<<24 | v3<<16; - return v; -} - -static inline unsigned int _unpkhu4 (unsigned int src1) { - unsigned v0 = src1>>24; - unsigned v1 = (src1>>16) & 0xff; - return (v0<<16) | v1; -} - -static inline unsigned int _unpklu4 (unsigned int src1) { - unsigned v1 = (src1>>8) & 0xff; - unsigned v0 = (src1) & 0xff; - return (v1<<16) | v0; -} - - - - -static inline unsigned int _xpnd2 (unsigned int src1) { - int v0 = (src1 & 0x1) ? 0x0000ffff : 0x00000000; - int v1 = (src1 & 0x2) ? 0xffff0000 : 0x00000000; - return v0|v1; -} - -static inline unsigned int _xpnd4 (unsigned int src1) { - int v0 = (src1 & 0x1) ? 0x000000ff : 0x00000000; - int v1 = (src1 & 0x2) ? 0x0000ff00 : 0x00000000; - int v2 = (src1 & 0x4) ? 0x00ff0000 : 0x00000000; - int v3 = (src1 & 0x8) ? 0xff000000 : 0x00000000; - int r = v0|v1|v2|v3; - return r; -} - - - -// end of Implemented in alphabetical order - - -#endif /* __C6X_COMPAT__H */ diff --git a/src/arch/xtensa/include/xtensa/cacheasm.h b/src/arch/xtensa/include/xtensa/cacheasm.h deleted file mode 100644 index 9cb2c8fcc6b8..000000000000 --- a/src/arch/xtensa/include/xtensa/cacheasm.h +++ /dev/null @@ -1,962 +0,0 @@ -/* - * xtensa/cacheasm.h -- assembler-specific cache related definitions - * that depend on CORE configuration - * - * This file is logically part of xtensa/coreasm.h , - * but is kept separate for modularity / compilation-performance. - */ - -/* - * Copyright (c) 2001-2014 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTENSA_CACHEASM_H -#define XTENSA_CACHEASM_H - -#include -#include -#include -#include - -/* - * This header file defines assembler macros of the form: - * cache_ - * where is 'i' or 'd' for instruction and data caches, - * and indicates the function of the macro. - * - * The following functions are defined, - * and apply only to the specified cache (I or D): - * - * reset - * Resets the cache. - * - * sync - * Makes sure any previous cache instructions have been completed; - * ie. makes sure any previous cache control operations - * have had full effect and been synchronized to memory. - * Eg. any invalidate completed [so as not to generate a hit], - * any writebacks or other pipelined writes written to memory, etc. - * - * invalidate_line (single cache line) - * invalidate_region (specified memory range) - * invalidate_all (entire cache) - * Invalidates all cache entries that cache - * data from the specified memory range. - * NOTE: locked entries are not invalidated. - * - * writeback_line (single cache line) - * writeback_region (specified memory range) - * writeback_all (entire cache) - * Writes back to memory all dirty cache entries - * that cache data from the specified memory range, - * and marks these entries as clean. - * NOTE: on some future implementations, this might - * also invalidate. - * NOTE: locked entries are written back, but never invalidated. - * NOTE: instruction caches never implement writeback. - * - * writeback_inv_line (single cache line) - * writeback_inv_region (specified memory range) - * writeback_inv_all (entire cache) - * Writes back to memory all dirty cache entries - * that cache data from the specified memory range, - * and invalidates these entries (including all clean - * cache entries that cache data from that range). - * NOTE: locked entries are written back but not invalidated. - * NOTE: instruction caches never implement writeback. - * - * lock_line (single cache line) - * lock_region (specified memory range) - * Prefetch and lock the specified memory range into cache. - * NOTE: if any part of the specified memory range cannot - * be locked, a Load/Store Error (for dcache) or Instruction - * Fetch Error (for icache) exception occurs. These macros don't - * do anything special (yet anyway) to handle this situation. - * - * unlock_line (single cache line) - * unlock_region (specified memory range) - * unlock_all (entire cache) - * Unlock cache entries that cache the specified memory range. - * Entries not already locked are unaffected. - * - * coherence_on - * coherence_off - * Turn off and on cache coherence - * - */ - - - -/*************************** GENERIC -- ALL CACHES ***************************/ - - -/* - * The following macros assume the following cache size/parameter limits - * in the current Xtensa core implementation: - * cache size: 1024 bytes minimum - * line size: 16 - 64 bytes - * way count: 1 - 4 - * - * Minimum entries per way (ie. per associativity) = 1024 / 64 / 4 = 4 - * Hence the assumption that each loop can execute four cache instructions. - * - * Correspondingly, the offset range of instructions is assumed able to cover - * four lines, ie. offsets {0,1,2,3} * line_size are assumed valid for - * both hit and indexed cache instructions. Ie. these offsets are all - * valid: 0, 16, 32, 48, 64, 96, 128, 192 (for line sizes 16, 32, 64). - * This is true of all original cache instructions - * (dhi, ihi, dhwb, dhwbi, dii, iii) which have offsets - * of 0 to 1020 in multiples of 4 (ie. 8 bits shifted by 2). - * This is also true of subsequent cache instructions - * (dhu, ihu, diu, iiu, diwb, diwbi, dpfl, ipfl) which have offsets - * of 0 to 240 in multiples of 16 (ie. 4 bits shifted by 4). - * - * (Maximum cache size, currently 32k, doesn't affect the following macros. - * Cache ways > MMU min page size cause aliasing but that's another matter.) - */ - - - -/* - * Macro to apply an 'indexed' cache instruction to the entire cache. - * - * Parameters: - * cainst instruction/ that takes an address register parameter - * and an offset parameter (in range 0 .. 3*linesize). - * size size of cache in bytes - * linesize size of cache line in bytes (always power-of-2) - * assoc_or1 number of associativities (ways/sets) in cache - * if all sets affected by cainst, - * or 1 if only one set (or not all sets) of the cache - * is affected by cainst (eg. DIWB or DIWBI [not yet ISA defined]). - * aa, ab unique address registers (temporaries). - * awb set to other than a0 if wb type of instruction - * loopokay 1 allows use of zero-overhead loops, 0 does not - * immrange range (max value) of cainst's immediate offset parameter, in bytes - * (NOTE: macro assumes immrange allows power-of-2 number of lines) - */ - - .macro cache_index_all cainst, size, linesize, assoc_or1, aa, ab, loopokay, maxofs, awb=a0 - - // Number of indices in cache (lines per way): - .set .Lindices, (\size / (\linesize * \assoc_or1)) - // Number of indices processed per loop iteration (max 4): - .set .Lperloop, .Lindices - .ifgt .Lperloop - 4 - .set .Lperloop, 4 - .endif - // Also limit instructions per loop if cache line size exceeds immediate range: - .set .Lmaxperloop, (\maxofs / \linesize) + 1 - .ifgt .Lperloop - .Lmaxperloop - .set .Lperloop, .Lmaxperloop - .endif - // Avoid addi of 128 which takes two instructions (addmi,addi): - .ifeq .Lperloop*\linesize - 128 - .ifgt .Lperloop - 1 - .set .Lperloop, .Lperloop / 2 - .endif - .endif - - // \size byte cache, \linesize byte lines, \assoc_or1 way(s) affected by each \cainst. - // XCHAL_ERRATUM_497 - don't execute using loop, to reduce the amount of added code - .ifne (\loopokay & XCHAL_HAVE_LOOPS && !XCHAL_ERRATUM_497) - - movi \aa, .Lindices / .Lperloop // number of loop iterations - // Possible improvement: need only loop if \aa > 1 ; - // however \aa == 1 is highly unlikely. - movi \ab, 0 // to iterate over cache - loop \aa, .Lend_cachex\@ - .set .Li, 0 ; .rept .Lperloop - \cainst \ab, .Li*\linesize - .set .Li, .Li+1 ; .endr - addi \ab, \ab, .Lperloop*\linesize // move to next line -.Lend_cachex\@: - - .else - - movi \aa, (\size / \assoc_or1) - // Possible improvement: need only loop if \aa > 1 ; - // however \aa == 1 is highly unlikely. - movi \ab, 0 // to iterate over cache - .ifne ((\awb !=a0) & XCHAL_ERRATUM_497) // don't use awb if set to a0 - movi \awb, 0 - .endif -.Lstart_cachex\@: - .set .Li, 0 ; .rept .Lperloop - \cainst \ab, .Li*\linesize - .set .Li, .Li+1 ; .endr - .ifne ((\awb !=a0) & XCHAL_ERRATUM_497) // do memw after 8 cainst wb instructions - addi \awb, \awb, .Lperloop - blti \awb, 8, .Lstart_memw\@ - memw - movi \awb, 0 -.Lstart_memw\@: - .endif - addi \ab, \ab, .Lperloop*\linesize // move to next line - bltu \ab, \aa, .Lstart_cachex\@ - .endif - - .endm - - -/* - * Macro to apply a 'hit' cache instruction to a memory region, - * ie. to any cache entries that cache a specified portion (region) of memory. - * Takes care of the unaligned cases, ie. may apply to one - * more cache line than $asize / lineSize if $aaddr is not aligned. - * - * - * Parameters are: - * cainst instruction/macro that takes an address register parameter - * and an offset parameter (currently always zero) - * and generates a cache instruction (eg. "dhi", "dhwb", "ihi", etc.) - * linesize_log2 log2(size of cache line in bytes) - * addr register containing start address of region (clobbered) - * asize register containing size of the region in bytes (clobbered) - * askew unique register used as temporary - * awb unique register used as temporary for erratum 497. - * - * Note: A possible optimization to this macro is to apply the operation - * to the entire cache if the region exceeds the size of the cache - * by some empirically determined amount or factor. Some experimentation - * is required to determine the appropriate factors, which also need - * to be tunable if required. - */ - - .macro cache_hit_region cainst, linesize_log2, addr, asize, askew, awb=a0 - - // Make \asize the number of iterations: - extui \askew, \addr, 0, \linesize_log2 // get unalignment amount of \addr - add \asize, \asize, \askew // ... and add it to \asize - addi \asize, \asize, (1 << \linesize_log2) - 1 // round up! - srli \asize, \asize, \linesize_log2 - - // Iterate over region: - .ifne ((\awb !=a0) & XCHAL_ERRATUM_497) // don't use awb if set to a0 - movi \awb, 0 - .endif - floopnez \asize, cacheh\@ - \cainst \addr, 0 - .ifne ((\awb !=a0) & XCHAL_ERRATUM_497) // do memw after 8 cainst wb instructions - addi \awb, \awb, 1 - blti \awb, 8, .Lstart_memw\@ - memw - movi \awb, 0 -.Lstart_memw\@: - .endif - addi \addr, \addr, (1 << \linesize_log2) // move to next line - floopend \asize, cacheh\@ - .endm - - - - - -/*************************** INSTRUCTION CACHE ***************************/ - - -/* - * Reset/initialize the instruction cache by simply invalidating it: - * (need to unlock first also, if cache locking implemented): - * - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro icache_reset aa, ab, loopokay=0 - icache_unlock_all \aa, \ab, \loopokay - icache_invalidate_all \aa, \ab, \loopokay - .endm - - -/* - * Synchronize after an instruction cache operation, - * to be sure everything is in sync with memory as to be - * expected following any previous instruction cache control operations. - * - * Even if a config doesn't have caches, an isync is still needed - * when instructions in any memory are modified, whether by a loader - * or self-modifying code. Therefore, this macro always produces - * an isync, whether or not an icache is present. - * - * Parameters are: - * ar an address register (temporary) (currently unused, but may be used in future) - */ - .macro icache_sync ar - isync - .endm - - - -/* - * Invalidate a single line of the instruction cache. - * Parameters are: - * ar address register that contains (virtual) address to invalidate - * (may get clobbered in a future implementation, but not currently) - * offset (optional) offset to add to \ar to compute effective address to invalidate - * (note: some number of lsbits are ignored) - */ - .macro icache_invalidate_line ar, offset -#if XCHAL_ICACHE_SIZE > 0 - ihi \ar, \offset // invalidate icache line - icache_sync \ar -#endif - .endm - - - - -/* - * Invalidate instruction cache entries that cache a specified portion of memory. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro icache_invalidate_region astart, asize, ac -#if XCHAL_ICACHE_SIZE > 0 - // Instruction cache region invalidation: - cache_hit_region ihi, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac - icache_sync \ac - // End of instruction cache region invalidation -#endif - .endm - - - -/* - * Invalidate entire instruction cache. - * - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro icache_invalidate_all aa, ab, loopokay=1 -#if XCHAL_ICACHE_SIZE > 0 - // Instruction cache invalidation: - cache_index_all iii, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, XCHAL_ICACHE_WAYS, \aa, \ab, \loopokay, 1020 - icache_sync \aa - // End of instruction cache invalidation -#endif - .endm - - - -/* - * Lock (prefetch & lock) a single line of the instruction cache. - * - * Parameters are: - * ar address register that contains (virtual) address to lock - * (may get clobbered in a future implementation, but not currently) - * offset offset to add to \ar to compute effective address to lock - * (note: some number of lsbits are ignored) - */ - .macro icache_lock_line ar, offset -#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE - ipfl \ar, \offset /* prefetch and lock icache line */ - icache_sync \ar -#endif - .endm - - - -/* - * Lock (prefetch & lock) a specified portion of memory into the instruction cache. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro icache_lock_region astart, asize, ac -#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE - // Instruction cache region lock: - cache_hit_region ipfl, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac - icache_sync \ac - // End of instruction cache region lock -#endif - .endm - - - -/* - * Unlock a single line of the instruction cache. - * - * Parameters are: - * ar address register that contains (virtual) address to unlock - * (may get clobbered in a future implementation, but not currently) - * offset offset to add to \ar to compute effective address to unlock - * (note: some number of lsbits are ignored) - */ - .macro icache_unlock_line ar, offset -#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE - ihu \ar, \offset /* unlock icache line */ - icache_sync \ar -#endif - .endm - - - -/* - * Unlock a specified portion of memory from the instruction cache. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro icache_unlock_region astart, asize, ac -#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE - // Instruction cache region unlock: - cache_hit_region ihu, XCHAL_ICACHE_LINEWIDTH, \astart, \asize, \ac - icache_sync \ac - // End of instruction cache region unlock -#endif - .endm - - - -/* - * Unlock entire instruction cache. - * - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro icache_unlock_all aa, ab, loopokay=1 -#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE - // Instruction cache unlock: - cache_index_all iiu, XCHAL_ICACHE_SIZE, XCHAL_ICACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240 - icache_sync \aa - // End of instruction cache unlock -#endif - .endm - - - - - -/*************************** DATA CACHE ***************************/ - - - -/* - * Reset/initialize the data cache by simply invalidating it - * (need to unlock first also, if cache locking implemented): - * - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro dcache_reset aa, ab, loopokay=0 - dcache_unlock_all \aa, \ab, \loopokay - dcache_invalidate_all \aa, \ab, \loopokay - .endm - - - - -/* - * Synchronize after a data cache operation, - * to be sure everything is in sync with memory as to be - * expected following any previous data cache control operations. - * - * Parameters are: - * ar an address register (temporary) (currently unused, but may be used in future) - */ - .macro dcache_sync ar, wbtype=0 -#if XCHAL_DCACHE_SIZE > 0 - // No synchronization is needed. - // (memw may be desired e.g. after writeback operation to help ensure subsequent - // external accesses are seen to follow that writeback, however that's outside - // the scope of this macro) - - //dsync - .ifne (\wbtype & XCHAL_ERRATUM_497) - memw - .endif -#endif - .endm - - - -/* - * Turn on cache coherence. - * - * WARNING: for RE-201x.x and later hardware, any interrupt that tries - * to change MEMCTL will see its changes dropped if the interrupt comes - * in the middle of this routine. If this might be an issue, call this - * routine with interrupts disabled. - * - * Parameters are: - * ar,at two scratch address registers (both clobbered) - */ - .macro cache_coherence_on ar at -#if XCHAL_DCACHE_IS_COHERENT -# if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0 - /* Have MEMCTL. Enable snoop responses. */ - rsr.memctl \ar - movi \at, MEMCTL_SNOOP_EN - or \ar, \ar, \at - wsr.memctl \ar -# elif XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX - /* Opt into coherence for MX (for backward compatibility / testing). */ - movi \ar, 1 - movi \at, XER_CCON - wer \ar, \at - extw -# endif -#endif - .endm - - - -/* - * Turn off cache coherence. - * - * NOTE: this is generally preceded by emptying the cache; - * see xthal_cache_coherence_optout() in hal/coherence.c for details. - * - * WARNING: for RE-201x.x and later hardware, any interrupt that tries - * to change MEMCTL will see its changes dropped if the interrupt comes - * in the middle of this routine. If this might be an issue, call this - * routine with interrupts disabled. - * - * Parameters are: - * ar,at two scratch address registers (both clobbered) - */ - .macro cache_coherence_off ar at -#if XCHAL_DCACHE_IS_COHERENT -# if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0 - /* Have MEMCTL. Disable snoop responses. */ - rsr.memctl \ar - movi \at, ~MEMCTL_SNOOP_EN - and \ar, \ar, \at - wsr.memctl \ar -# elif XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX - /* Opt out of coherence, for MX (for backward compatibility / testing). */ - extw - movi \at, 0 - movi \ar, XER_CCON - wer \at, \ar - extw -# endif -#endif - .endm - - - -/* - * Synchronize after a data store operation, - * to be sure the stored data is completely off the processor - * (and assuming there is no buffering outside the processor, - * that the data is in memory). This may be required to - * ensure that the processor's write buffers are emptied. - * A MEMW followed by a read guarantees this, by definition. - * We also try to make sure the read itself completes. - * - * Parameters are: - * ar an address register (temporary) - */ - .macro write_sync ar - memw // ensure previous memory accesses are complete prior to subsequent memory accesses - l32i \ar, sp, 0 // completing this read ensures any previous write has completed, because of MEMW - //slot - add \ar, \ar, \ar // use the result of the read to help ensure the read completes (in future architectures) - .endm - - -/* - * Invalidate a single line of the data cache. - * Parameters are: - * ar address register that contains (virtual) address to invalidate - * (may get clobbered in a future implementation, but not currently) - * offset (optional) offset to add to \ar to compute effective address to invalidate - * (note: some number of lsbits are ignored) - */ - .macro dcache_invalidate_line ar, offset -#if XCHAL_DCACHE_SIZE > 0 - dhi \ar, \offset - dcache_sync \ar -#endif - .endm - - - - - -/* - * Invalidate data cache entries that cache a specified portion of memory. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro dcache_invalidate_region astart, asize, ac -#if XCHAL_DCACHE_SIZE > 0 - // Data cache region invalidation: - cache_hit_region dhi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac - dcache_sync \ac - // End of data cache region invalidation -#endif - .endm - - - -/* - * Invalidate entire data cache. - * - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro dcache_invalidate_all aa, ab, loopokay=1 -#if XCHAL_DCACHE_SIZE > 0 - // Data cache invalidation: - cache_index_all dii, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, XCHAL_DCACHE_WAYS, \aa, \ab, \loopokay, 1020 - dcache_sync \aa - // End of data cache invalidation -#endif - .endm - - - -/* - * Writeback a single line of the data cache. - * Parameters are: - * ar address register that contains (virtual) address to writeback - * (may get clobbered in a future implementation, but not currently) - * offset offset to add to \ar to compute effective address to writeback - * (note: some number of lsbits are ignored) - */ - .macro dcache_writeback_line ar, offset -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK - dhwb \ar, \offset - dcache_sync \ar, wbtype=1 -#endif - .endm - - - -/* - * Writeback dirty data cache entries that cache a specified portion of memory. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro dcache_writeback_region astart, asize, ac, awb -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK - // Data cache region writeback: - cache_hit_region dhwb, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac, \awb - dcache_sync \ac, wbtype=1 - // End of data cache region writeback -#endif - .endm - - - -/* - * Writeback entire data cache. - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro dcache_writeback_all aa, ab, awb, loopokay=1 -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK - // Data cache writeback: - cache_index_all diwb, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240, \awb, - dcache_sync \aa, wbtype=1 - // End of data cache writeback -#endif - .endm - - - -/* - * Writeback and invalidate a single line of the data cache. - * Parameters are: - * ar address register that contains (virtual) address to writeback and invalidate - * (may get clobbered in a future implementation, but not currently) - * offset offset to add to \ar to compute effective address to writeback and invalidate - * (note: some number of lsbits are ignored) - */ - .macro dcache_writeback_inv_line ar, offset -#if XCHAL_DCACHE_SIZE > 0 - dhwbi \ar, \offset /* writeback and invalidate dcache line */ - dcache_sync \ar, wbtype=1 -#endif - .endm - - - -/* - * Writeback and invalidate data cache entries that cache a specified portion of memory. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro dcache_writeback_inv_region astart, asize, ac, awb -#if XCHAL_DCACHE_SIZE > 0 - // Data cache region writeback and invalidate: - cache_hit_region dhwbi, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac, \awb - dcache_sync \ac, wbtype=1 - // End of data cache region writeback and invalidate -#endif - .endm - - - -/* - * Writeback and invalidate entire data cache. - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro dcache_writeback_inv_all aa, ab, awb, loopokay=1 -#if XCHAL_DCACHE_SIZE > 0 - // Data cache writeback and invalidate: -#if XCHAL_DCACHE_IS_WRITEBACK - cache_index_all diwbi, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240, \awb - dcache_sync \aa, wbtype=1 -#else /*writeback*/ - // Data cache does not support writeback, so just invalidate: */ - dcache_invalidate_all \aa, \ab, \loopokay -#endif /*writeback*/ - // End of data cache writeback and invalidate -#endif - .endm - - - - -/* - * Lock (prefetch & lock) a single line of the data cache. - * - * Parameters are: - * ar address register that contains (virtual) address to lock - * (may get clobbered in a future implementation, but not currently) - * offset offset to add to \ar to compute effective address to lock - * (note: some number of lsbits are ignored) - */ - .macro dcache_lock_line ar, offset -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE - dpfl \ar, \offset /* prefetch and lock dcache line */ - dcache_sync \ar -#endif - .endm - - - -/* - * Lock (prefetch & lock) a specified portion of memory into the data cache. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro dcache_lock_region astart, asize, ac -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE - // Data cache region lock: - cache_hit_region dpfl, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac - dcache_sync \ac - // End of data cache region lock -#endif - .endm - - - -/* - * Unlock a single line of the data cache. - * - * Parameters are: - * ar address register that contains (virtual) address to unlock - * (may get clobbered in a future implementation, but not currently) - * offset offset to add to \ar to compute effective address to unlock - * (note: some number of lsbits are ignored) - */ - .macro dcache_unlock_line ar, offset -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE - dhu \ar, \offset /* unlock dcache line */ - dcache_sync \ar -#endif - .endm - - - -/* - * Unlock a specified portion of memory from the data cache. - * Parameters are: - * astart start address (register gets clobbered) - * asize size of the region in bytes (register gets clobbered) - * ac unique register used as temporary - */ - .macro dcache_unlock_region astart, asize, ac -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE - // Data cache region unlock: - cache_hit_region dhu, XCHAL_DCACHE_LINEWIDTH, \astart, \asize, \ac - dcache_sync \ac - // End of data cache region unlock -#endif - .endm - - - -/* - * Unlock entire data cache. - * - * Parameters: - * aa, ab unique address registers (temporaries) - */ - .macro dcache_unlock_all aa, ab, loopokay=1 -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE - // Data cache unlock: - cache_index_all diu, XCHAL_DCACHE_SIZE, XCHAL_DCACHE_LINESIZE, 1, \aa, \ab, \loopokay, 240 - dcache_sync \aa - // End of data cache unlock -#endif - .endm - - - -/* - * Get the number of enabled icache ways. Note that this may - * be different from the value read from the MEMCTL register. - * - * Parameters: - * aa address register where value is returned - */ - .macro icache_get_ways aa -#if XCHAL_ICACHE_SIZE > 0 -#if XCHAL_HAVE_ICACHE_DYN_WAYS - // Read from MEMCTL and shift/mask - rsr.memctl \aa - extui \aa, \aa, MEMCTL_ICWU_SHIFT, MEMCTL_ICWU_BITS - blti \aa, XCHAL_ICACHE_WAYS, .Licgw - movi \aa, XCHAL_ICACHE_WAYS -.Licgw: -#else - // All ways are always enabled - movi \aa, XCHAL_ICACHE_WAYS -#endif -#else - // No icache - movi \aa, 0 -#endif - .endm - - - -/* - * Set the number of enabled icache ways. - * - * Parameters: - * aa address register specifying number of ways (trashed) - * ab,ac address register for scratch use (trashed) - */ - .macro icache_set_ways aa, ab, ac -#if XCHAL_ICACHE_SIZE > 0 -#if XCHAL_HAVE_ICACHE_DYN_WAYS - movi \ac, MEMCTL_ICWU_CLR_MASK // set up to clear bits 18-22 - rsr.memctl \ab - and \ab, \ab, \ac - movi \ac, MEMCTL_INV_EN // set bit 23 - slli \aa, \aa, MEMCTL_ICWU_SHIFT // move to right spot - or \ab, \ab, \aa - or \ab, \ab, \ac - wsr.memctl \ab - isync -#else - // All ways are always enabled -#endif -#else - // No icache -#endif - .endm - - - -/* - * Get the number of enabled dcache ways. Note that this may - * be different from the value read from the MEMCTL register. - * - * Parameters: - * aa address register where value is returned - */ - .macro dcache_get_ways aa -#if XCHAL_DCACHE_SIZE > 0 -#if XCHAL_HAVE_DCACHE_DYN_WAYS - // Read from MEMCTL and shift/mask - rsr.memctl \aa - extui \aa, \aa, MEMCTL_DCWU_SHIFT, MEMCTL_DCWU_BITS - blti \aa, XCHAL_DCACHE_WAYS, .Ldcgw - movi \aa, XCHAL_DCACHE_WAYS -.Ldcgw: -#else - // All ways are always enabled - movi \aa, XCHAL_DCACHE_WAYS -#endif -#else - // No dcache - movi \aa, 0 -#endif - .endm - - - -/* - * Set the number of enabled dcache ways. - * - * Parameters: - * aa address register specifying number of ways (trashed) - * ab,ac address register for scratch use (trashed) - */ - .macro dcache_set_ways aa, ab, ac -#if (XCHAL_DCACHE_SIZE > 0) && XCHAL_HAVE_DCACHE_DYN_WAYS - movi \ac, MEMCTL_DCWA_CLR_MASK // set up to clear bits 13-17 - rsr.memctl \ab - and \ab, \ab, \ac // clear ways allocatable - slli \ac, \aa, MEMCTL_DCWA_SHIFT - or \ab, \ab, \ac // set ways allocatable - wsr.memctl \ab -#if XCHAL_DCACHE_IS_WRITEBACK - // Check if the way count is increasing or decreasing - extui \ac, \ab, MEMCTL_DCWU_SHIFT, MEMCTL_DCWU_BITS // bits 8-12 - ways in use - bge \aa, \ac, .Ldsw3 // equal or increasing - slli \ab, \aa, XCHAL_DCACHE_LINEWIDTH + XCHAL_DCACHE_SETWIDTH // start way number - slli \ac, \ac, XCHAL_DCACHE_LINEWIDTH + XCHAL_DCACHE_SETWIDTH // end way number -.Ldsw1: - diwbui.p \ab // auto-increments ab - bge \ab, \ac, .Ldsw2 - beqz \ab, .Ldsw2 - j .Ldsw1 -.Ldsw2: - rsr.memctl \ab -#endif -.Ldsw3: - // No dirty data to write back, just set the new number of ways - movi \ac, MEMCTL_DCWU_CLR_MASK // set up to clear bits 8-12 - and \ab, \ab, \ac // clear ways in use - movi \ac, MEMCTL_INV_EN - or \ab, \ab, \ac // set bit 23 - slli \aa, \aa, MEMCTL_DCWU_SHIFT - or \ab, \ab, \aa // set ways in use - wsr.memctl \ab -#else - // No dcache or no way disable support -#endif - .endm - -#endif /*XTENSA_CACHEASM_H*/ - diff --git a/src/arch/xtensa/include/xtensa/cacheattrasm.h b/src/arch/xtensa/include/xtensa/cacheattrasm.h deleted file mode 100644 index 211803aedbf3..000000000000 --- a/src/arch/xtensa/include/xtensa/cacheattrasm.h +++ /dev/null @@ -1,436 +0,0 @@ -/* - * xtensa/cacheattrasm.h -- assembler-specific CACHEATTR register related definitions - * that depend on CORE configuration - * - * This file is logically part of xtensa/coreasm.h (or perhaps xtensa/cacheasm.h), - * but is kept separate for modularity / compilation-performance. - */ - -/* - * Copyright (c) 2001-2009 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTENSA_CACHEATTRASM_H -#define XTENSA_CACHEATTRASM_H - -#include - -/* Determine whether cache attributes are controlled using eight 512MB entries: */ -#define XCHAL_CA_8X512 (XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \ - || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)) - - -/* - * This header file defines assembler macros of the form: - * cacheattr_ - * where: - * is 'i', 'd' or absent for instruction, data - * or both caches; and - * indicates the function of the macro. - * - * The following functions are defined: - * - * icacheattr_get - * Reads I-cache CACHEATTR into a2 (clobbers a3-a5). - * - * dcacheattr_get - * Reads D-cache CACHEATTR into a2 (clobbers a3-a5). - * (Note: for configs with a real CACHEATTR register, the - * above two macros are identical.) - * - * cacheattr_set - * Writes both I-cache and D-cache CACHEATTRs from a2 (a3-a8 clobbered). - * Works even when changing one's own code's attributes. - * - * icacheattr_is_enabled label - * Branches to \label if I-cache appears to have been enabled - * (eg. if CACHEATTR contains a cache-enabled attribute). - * (clobbers a2-a5,SAR) - * - * dcacheattr_is_enabled label - * Branches to \label if D-cache appears to have been enabled - * (eg. if CACHEATTR contains a cache-enabled attribute). - * (clobbers a2-a5,SAR) - * - * cacheattr_is_enabled label - * Branches to \label if either I-cache or D-cache appears to have been enabled - * (eg. if CACHEATTR contains a cache-enabled attribute). - * (clobbers a2-a5,SAR) - * - * The following macros are only defined under certain conditions: - * - * icacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR) - * Writes I-cache CACHEATTR from a2 (a3-a8 clobbered). - * - * dcacheattr_set (if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR) - * Writes D-cache CACHEATTR from a2 (a3-a8 clobbered). - */ - - - -/*************************** GENERIC -- ALL CACHES ***************************/ - -/* - * _cacheattr_get - * - * (Internal macro.) - * Returns value of CACHEATTR register (or closest equivalent) in a2. - * - * Entry: - * (none) - * Exit: - * a2 value read from CACHEATTR - * a3-a5 clobbered (temporaries) - */ - .macro _cacheattr_get tlb -#if XCHAL_HAVE_CACHEATTR - rsr.cacheattr a2 -#elif XCHAL_CA_8X512 - // We have a config that "mimics" CACHEATTR using a simplified - // "MMU" composed of a single statically-mapped way. - // DTLB and ITLB are independent, so there's no single - // cache attribute that can describe both. So for now - // just return the DTLB state. - movi a5, 0xE0000000 - movi a2, 0 - movi a3, XCHAL_SPANNING_WAY -1: add a3, a3, a5 // next segment - r&tlb&1 a4, a3 // get PPN+CA of segment at 0xE0000000, 0xC0000000, ..., 0 - dsync // interlock??? - slli a2, a2, 4 - extui a4, a4, 0, 4 // extract CA - or a2, a2, a4 - bgeui a3, 16, 1b -#else - // This macro isn't applicable to arbitrary MMU configurations. - // Just return zero. - movi a2, 0 -#endif - .endm - - .macro icacheattr_get - _cacheattr_get itlb - .endm - - .macro dcacheattr_get - _cacheattr_get dtlb - .endm - - -/* Default (powerup/reset) value of CACHEATTR, - all BYPASS mode (ie. disabled/bypassed caches): */ -#if XCHAL_HAVE_PTP_MMU -# define XCHAL_CACHEATTR_ALL_BYPASS 0x33333333 -#else -# define XCHAL_CACHEATTR_ALL_BYPASS 0x22222222 -#endif - -#if XCHAL_CA_8X512 - -#if XCHAL_HAVE_PTP_MMU -# define XCHAL_FCA_ENAMASK 0x0AA0 /* bitmap of fetch attributes that require enabled icache */ -# define XCHAL_LCA_ENAMASK 0x0FF0 /* bitmap of load attributes that require enabled dcache */ -# define XCHAL_SCA_ENAMASK 0x0CC0 /* bitmap of store attributes that require enabled dcache */ -#else -# define XCHAL_FCA_ENAMASK 0x003A /* bitmap of fetch attributes that require enabled icache */ -# define XCHAL_LCA_ENAMASK 0x0033 /* bitmap of load attributes that require enabled dcache */ -# define XCHAL_SCA_ENAMASK 0x0033 /* bitmap of store attributes that require enabled dcache */ -#endif -#define XCHAL_LSCA_ENAMASK (XCHAL_LCA_ENAMASK|XCHAL_SCA_ENAMASK) /* l/s attrs requiring enabled dcache */ -#define XCHAL_ALLCA_ENAMASK (XCHAL_FCA_ENAMASK|XCHAL_LSCA_ENAMASK) /* all attrs requiring enabled caches */ - -/* - * _cacheattr_is_enabled - * - * (Internal macro.) - * Branches to \label if CACHEATTR in a2 indicates an enabled - * cache, using mask in a3. - * - * Parameters: - * label where to branch to if cache is enabled - * Entry: - * a2 contains CACHEATTR value used to determine whether - * caches are enabled - * a3 16-bit constant where each bit correspond to - * one of the 16 possible CA values (in a CACHEATTR mask); - * CA values that indicate the cache is enabled - * have their corresponding bit set in this mask - * (eg. use XCHAL_xCA_ENAMASK , above) - * Exit: - * a2,a4,a5 clobbered - * SAR clobbered - */ - .macro _cacheattr_is_enabled label - movi a4, 8 // loop 8 times -.Lcaife\@: - extui a5, a2, 0, 4 // get CA nibble - ssr a5 // index into mask according to CA... - srl a5, a3 // ...and get CA's mask bit in a5 bit 0 - bbsi.l a5, 0, \label // if CA indicates cache enabled, jump to label - srli a2, a2, 4 // next nibble - addi a4, a4, -1 - bnez a4, .Lcaife\@ // loop for each nibble - .endm - -#else /* XCHAL_CA_8X512 */ - .macro _cacheattr_is_enabled label - j \label // macro not applicable, assume caches always enabled - .endm -#endif /* XCHAL_CA_8X512 */ - - - -/* - * icacheattr_is_enabled - * - * Branches to \label if I-cache is enabled. - * - * Parameters: - * label where to branch to if icache is enabled - * Entry: - * (none) - * Exit: - * a2-a5, SAR clobbered (temporaries) - */ - .macro icacheattr_is_enabled label -#if XCHAL_CA_8X512 - icacheattr_get - movi a3, XCHAL_FCA_ENAMASK -#endif - _cacheattr_is_enabled \label - .endm - -/* - * dcacheattr_is_enabled - * - * Branches to \label if D-cache is enabled. - * - * Parameters: - * label where to branch to if dcache is enabled - * Entry: - * (none) - * Exit: - * a2-a5, SAR clobbered (temporaries) - */ - .macro dcacheattr_is_enabled label -#if XCHAL_CA_8X512 - dcacheattr_get - movi a3, XCHAL_LSCA_ENAMASK -#endif - _cacheattr_is_enabled \label - .endm - -/* - * cacheattr_is_enabled - * - * Branches to \label if either I-cache or D-cache is enabled. - * - * Parameters: - * label where to branch to if a cache is enabled - * Entry: - * (none) - * Exit: - * a2-a5, SAR clobbered (temporaries) - */ - .macro cacheattr_is_enabled label -#if XCHAL_HAVE_CACHEATTR - rsr.cacheattr a2 - movi a3, XCHAL_ALLCA_ENAMASK -#elif XCHAL_CA_8X512 - icacheattr_get - movi a3, XCHAL_FCA_ENAMASK - _cacheattr_is_enabled \label - dcacheattr_get - movi a3, XCHAL_LSCA_ENAMASK -#endif - _cacheattr_is_enabled \label - .endm - - - -/* - * The ISA does not have a defined way to change the - * instruction cache attributes of the running code, - * ie. of the memory area that encloses the current PC. - * However, each micro-architecture (or class of - * configurations within a micro-architecture) - * provides a way to deal with this issue. - * - * Here are a few macros used to implement the relevant - * approach taken. - */ - -#if XCHAL_CA_8X512 && !XCHAL_HAVE_CACHEATTR - // We have a config that "mimics" CACHEATTR using a simplified - // "MMU" composed of a single statically-mapped way. - -/* - * icacheattr_set - * - * Entry: - * a2 cacheattr value to set - * Exit: - * a2 unchanged - * a3-a8 clobbered (temporaries) - */ - .macro icacheattr_set - - movi a5, 0xE0000000 // mask of upper 3 bits - movi a6, 3f // PC where ITLB is set - movi a3, XCHAL_SPANNING_WAY // start at region 0 (0 .. 7) - mov a7, a2 // copy a2 so it doesn't get clobbered - and a6, a6, a5 // upper 3 bits of local PC area - j 3f - - // Use micro-architecture specific method. - // The following 4-instruction sequence is aligned such that - // it all fits within a single I-cache line. Sixteen byte - // alignment is sufficient for this (using XCHAL_ICACHE_LINESIZE - // actually causes problems because that can be greater than - // the alignment of the reset vector, where this macro is often - // invoked, which would cause the linker to align the reset - // vector code away from the reset vector!!). - .begin no-transform - .align 16 /*XCHAL_ICACHE_LINESIZE*/ -1: witlb a4, a3 // write wired PTE (CA, no PPN) of 512MB segment to ITLB - isync - .end no-transform - nop - nop - - sub a3, a3, a5 // next segment (add 0x20000000) - bltui a3, 16, 4f // done? - - // Note that in the WITLB loop, we don't do any load/stores - // (may not be an issue here, but it is important in the DTLB case). -2: srli a7, a7, 4 // next CA -3: -# if XCHAL_HAVE_MIMIC_CACHEATTR - extui a4, a7, 0, 4 // extract CA to set -# else /* have translation, preserve it: */ - ritlb1 a8, a3 // get current PPN+CA of segment - //dsync // interlock??? - extui a4, a7, 0, 4 // extract CA to set - srli a8, a8, 4 // clear CA but keep PPN ... - slli a8, a8, 4 // ... - add a4, a4, a8 // combine new CA with PPN to preserve -# endif - beq a3, a6, 1b // current PC's region? if so, do it in a safe way - witlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to ITLB - sub a3, a3, a5 // next segment (add 0x20000000) - bgeui a3, 16, 2b - isync // make sure all ifetch changes take effect -4: - .endm // icacheattr_set - - -/* - * dcacheattr_set - * - * Entry: - * a2 cacheattr value to set - * Exit: - * a2 unchanged - * a3-a8 clobbered (temporaries) - */ - - .macro dcacheattr_set - - movi a5, 0xE0000000 // mask of upper 3 bits - movi a3, XCHAL_SPANNING_WAY // start at region 0 (0 .. 7) - mov a7, a2 // copy a2 so it doesn't get clobbered - // Note that in the WDTLB loop, we don't do any load/stores -2: // (including implicit l32r via movi) because it isn't safe. -# if XCHAL_HAVE_MIMIC_CACHEATTR - extui a4, a7, 0, 4 // extract CA to set -# else /* have translation, preserve it: */ - rdtlb1 a8, a3 // get current PPN+CA of segment - //dsync // interlock??? - extui a4, a7, 0, 4 // extract CA to set - srli a8, a8, 4 // clear CA but keep PPN ... - slli a8, a8, 4 // ... - add a4, a4, a8 // combine new CA with PPN to preserve -# endif - wdtlb a4, a3 // write wired PTE (CA [+PPN]) of 512MB segment to DTLB - sub a3, a3, a5 // next segment (add 0x20000000) - srli a7, a7, 4 // next CA - bgeui a3, 16, 2b - dsync // make sure all data path changes take effect - .endm // dcacheattr_set - -#endif /* XCHAL_CA_8X512 && !XCHAL_HAVE_CACHEATTR */ - - - -/* - * cacheattr_set - * - * Macro that sets the current CACHEATTR safely - * (both i and d) according to the current contents of a2. - * It works even when changing the cache attributes of - * the currently running code. - * - * Entry: - * a2 cacheattr value to set - * Exit: - * a2 unchanged - * a3-a8 clobbered (temporaries) - */ - .macro cacheattr_set - -#if XCHAL_HAVE_CACHEATTR -# if XCHAL_ICACHE_LINESIZE < 4 - // No i-cache, so can always safely write to CACHEATTR: - wsr.cacheattr a2 -# else - // The Athens micro-architecture, when using the old - // exception architecture option (ie. with the CACHEATTR register) - // allows changing the cache attributes of the running code - // using the following exact sequence aligned to be within - // an instruction cache line. (NOTE: using XCHAL_ICACHE_LINESIZE - // alignment actually causes problems because that can be greater - // than the alignment of the reset vector, where this macro is often - // invoked, which would cause the linker to align the reset - // vector code away from the reset vector!!). - j 1f - .begin no-transform - .align 16 /*XCHAL_ICACHE_LINESIZE*/ // align to within an I-cache line -1: wsr.cacheattr a2 - isync - .end no-transform - nop - nop -# endif -#elif XCHAL_CA_8X512 - // DTLB and ITLB are independent, but to keep semantics - // of this macro we simply write to both. - icacheattr_set - dcacheattr_set -#else - // This macro isn't applicable to arbitrary MMU configurations. - // Do nothing in this case. -#endif - .endm - - -#endif /*XTENSA_CACHEATTRASM_H*/ - diff --git a/src/arch/xtensa/include/xtensa/config/core.h b/src/arch/xtensa/include/xtensa/config/core.h deleted file mode 100644 index aa18c42a947d..000000000000 --- a/src/arch/xtensa/include/xtensa/config/core.h +++ /dev/null @@ -1,1420 +0,0 @@ -/* - * xtensa/config/core.h -- HAL definitions dependent on CORE configuration - * - * This header file is sometimes referred to as the "compile-time HAL" or CHAL. - * It pulls definitions tailored for a specific Xtensa processor configuration. - * - * Sources for binaries meant to be configuration-independent generally avoid - * including this file (they may use the configuration-specific HAL library). - * It is normal for the HAL library source itself to include this file. - */ - -/* - * Copyright (c) 2005-2015 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - - -#ifndef XTENSA_CONFIG_CORE_H -#define XTENSA_CONFIG_CORE_H - -/* xt-clang uses UINT32_C() without import it. This affects both Zephyr and XTOS. - * See #9413 for the longer story. - */ -#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) -# ifndef UINT32_C -# define UINT32_C(x) x -# endif -#else - /* UINT32_C(x) x ## U */ -# include -#endif - -/* CONFIGURATION INDEPENDENT DEFINITIONS: */ -#ifdef __XTENSA__ -#include -#include -#else -#include "../hal.h" -#include "../xtensa-versions.h" -#endif - -/* CONFIGURATION SPECIFIC DEFINITIONS: */ -#ifdef __XTENSA__ -#include -#include -#include -#else -#include "core-isa.h" -#include "core-matmap.h" -#include "tie.h" -#endif - -#if defined (_ASMLANGUAGE) || defined (__ASSEMBLER__) -#ifdef __XTENSA__ -#include -#else -#include "tie-asm.h" -#endif -#endif /*_ASMLANGUAGE or __ASSEMBLER__*/ - - -/*---------------------------------------------------------------------- - GENERAL - ----------------------------------------------------------------------*/ - -/* - * Separators for macros that expand into arrays. - * These can be predefined by files that #include this one, - * when different separators are required. - */ -/* Element separator for macros that expand into 1-dimensional arrays: */ -#ifndef XCHAL_SEP -#define XCHAL_SEP , -#endif -/* Array separator for macros that expand into 2-dimensional arrays: */ -#ifndef XCHAL_SEP2 -#define XCHAL_SEP2 },{ -#endif - - -/*---------------------------------------------------------------------- - ERRATA - ----------------------------------------------------------------------*/ - -/* - * Erratum T1020.H13, T1030.H7, T1040.H10, T1050.H4 (fixed in T1040.3 and T1050.1; - * relevant only in XEA1, kernel-vector mode, level-one interrupts and overflows enabled): - */ -#define XCHAL_MAYHAVE_ERRATUM_XEA1KWIN (XCHAL_HAVE_XEA1 && \ - (XCHAL_HW_RELEASE_AT_OR_BELOW(1040,2) != 0 \ - || XCHAL_HW_RELEASE_AT(1050,0))) -/* - * Erratum 453 present in RE-2013.2 up to RF-2014.0, fixed in RF-2014.1. - * Applies to specific set of configuration options. - * Part of the workaround is to add ISYNC at certain points in the code. - * The workaround gated by this macro can be disabled if not needed, e.g. if - * zero-overhead loop buffer will be disabled, by defining _NO_ERRATUM_453. - */ -#if ( XCHAL_HW_MAX_VERSION >= XTENSA_HWVERSION_RE_2013_2 && \ - XCHAL_HW_MIN_VERSION <= XTENSA_HWVERSION_RF_2014_0 && \ - XCHAL_ICACHE_SIZE != 0 && XCHAL_HAVE_PIF /*covers also AXI/AHB*/ && \ - XCHAL_HAVE_LOOPS && XCHAL_LOOP_BUFFER_SIZE != 0 && \ - XCHAL_CLOCK_GATING_GLOBAL && !defined(_NO_ERRATUM_453) ) -#define XCHAL_ERRATUM_453 1 -#else -#define XCHAL_ERRATUM_453 0 -#endif - -/* - * Erratum 497 present in RE-2012.2 up to RG/RF-2015.2 - * Applies to specific set of configuration options. - * Workaround is to add MEMWs after at most 8 cache WB instructions - */ -#if ( ((XCHAL_HW_MAX_VERSION >= XTENSA_HWVERSION_RE_2012_0 && \ - XCHAL_HW_MIN_VERSION <= XTENSA_HWVERSION_RF_2015_2) || \ - (XCHAL_HW_MAX_VERSION >= XTENSA_HWVERSION_RG_2015_0 && \ - XCHAL_HW_MIN_VERSION <= XTENSA_HWVERSION_RG_2015_2) \ - ) && \ - XCHAL_DCACHE_IS_WRITEBACK && \ - XCHAL_HAVE_AXI && \ - XCHAL_HAVE_PIF_WR_RESP && \ - XCHAL_HAVE_PIF_REQ_ATTR && !defined(_NO_ERRATUM_497) \ - ) -#define XCHAL_ERRATUM_497 1 -#else -#define XCHAL_ERRATUM_497 0 -#endif - - -/*---------------------------------------------------------------------- - ISA - ----------------------------------------------------------------------*/ - -#if XCHAL_HAVE_BE -# define XCHAL_HAVE_LE 0 -# define XCHAL_MEMORY_ORDER XTHAL_BIGENDIAN -#else -# define XCHAL_HAVE_LE 1 -# define XCHAL_MEMORY_ORDER XTHAL_LITTLEENDIAN -#endif - - - -/*---------------------------------------------------------------------- - INTERRUPTS - ----------------------------------------------------------------------*/ - -/* Indexing macros: */ -#define _XCHAL_INTLEVEL_MASK(n) XCHAL_INTLEVEL ## n ## _MASK -#define XCHAL_INTLEVEL_MASK(n) _XCHAL_INTLEVEL_MASK(n) /* n = 0 .. 15 */ -#define _XCHAL_INTLEVEL_ANDBELOWMASK(n) XCHAL_INTLEVEL ## n ## _ANDBELOW_MASK -#define XCHAL_INTLEVEL_ANDBELOW_MASK(n) _XCHAL_INTLEVEL_ANDBELOWMASK(n) /* n = 0 .. 15 */ -#define _XCHAL_INTLEVEL_NUM(n) XCHAL_INTLEVEL ## n ## _NUM -#define XCHAL_INTLEVEL_NUM(n) _XCHAL_INTLEVEL_NUM(n) /* n = 0 .. 15 */ -#define _XCHAL_INT_LEVEL(n) XCHAL_INT ## n ## _LEVEL -#define XCHAL_INT_LEVEL(n) _XCHAL_INT_LEVEL(n) /* n = 0 .. 31 */ -#define _XCHAL_INT_TYPE(n) XCHAL_INT ## n ## _TYPE -#define XCHAL_INT_TYPE(n) _XCHAL_INT_TYPE(n) /* n = 0 .. 31 */ -#define _XCHAL_TIMER_INTERRUPT(n) XCHAL_TIMER ## n ## _INTERRUPT -#define XCHAL_TIMER_INTERRUPT(n) _XCHAL_TIMER_INTERRUPT(n) /* n = 0 .. 3 */ - - -#define XCHAL_HAVE_HIGHLEVEL_INTERRUPTS XCHAL_HAVE_HIGHPRI_INTERRUPTS -#define XCHAL_NUM_LOWPRI_LEVELS 1 /* number of low-priority interrupt levels (always 1) */ -#define XCHAL_FIRST_HIGHPRI_LEVEL (XCHAL_NUM_LOWPRI_LEVELS+1) /* level of first high-priority interrupt (always 2) */ -/* Note: 1 <= LOWPRI_LEVELS <= EXCM_LEVEL < DEBUGLEVEL <= NUM_INTLEVELS < NMILEVEL <= 15 */ - -/* These values are constant for existing Xtensa processor implementations: */ -#define XCHAL_INTLEVEL0_MASK 0x00000000 -#define XCHAL_INTLEVEL8_MASK 0x00000000 -#define XCHAL_INTLEVEL9_MASK 0x00000000 -#define XCHAL_INTLEVEL10_MASK 0x00000000 -#define XCHAL_INTLEVEL11_MASK 0x00000000 -#define XCHAL_INTLEVEL12_MASK 0x00000000 -#define XCHAL_INTLEVEL13_MASK 0x00000000 -#define XCHAL_INTLEVEL14_MASK 0x00000000 -#define XCHAL_INTLEVEL15_MASK 0x00000000 - -/* Array of masks of interrupts at each interrupt level: */ -#define XCHAL_INTLEVEL_MASKS XCHAL_INTLEVEL0_MASK \ - XCHAL_SEP XCHAL_INTLEVEL1_MASK \ - XCHAL_SEP XCHAL_INTLEVEL2_MASK \ - XCHAL_SEP XCHAL_INTLEVEL3_MASK \ - XCHAL_SEP XCHAL_INTLEVEL4_MASK \ - XCHAL_SEP XCHAL_INTLEVEL5_MASK \ - XCHAL_SEP XCHAL_INTLEVEL6_MASK \ - XCHAL_SEP XCHAL_INTLEVEL7_MASK \ - XCHAL_SEP XCHAL_INTLEVEL8_MASK \ - XCHAL_SEP XCHAL_INTLEVEL9_MASK \ - XCHAL_SEP XCHAL_INTLEVEL10_MASK \ - XCHAL_SEP XCHAL_INTLEVEL11_MASK \ - XCHAL_SEP XCHAL_INTLEVEL12_MASK \ - XCHAL_SEP XCHAL_INTLEVEL13_MASK \ - XCHAL_SEP XCHAL_INTLEVEL14_MASK \ - XCHAL_SEP XCHAL_INTLEVEL15_MASK - -/* These values are constant for existing Xtensa processor implementations: */ -#define XCHAL_INTLEVEL0_ANDBELOW_MASK 0x00000000 -#define XCHAL_INTLEVEL8_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL9_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL10_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL11_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL12_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL13_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL14_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK -#define XCHAL_INTLEVEL15_ANDBELOW_MASK XCHAL_INTLEVEL7_ANDBELOW_MASK - -/* Mask of all low-priority interrupts: */ -#define XCHAL_LOWPRI_MASK XCHAL_INTLEVEL1_ANDBELOW_MASK - -/* Mask of all interrupts masked by PS.EXCM (or CEXCM): */ -#define XCHAL_EXCM_MASK XCHAL_INTLEVEL_ANDBELOW_MASK(XCHAL_EXCM_LEVEL) - -/* Array of masks of interrupts at each range 1..n of interrupt levels: */ -#define XCHAL_INTLEVEL_ANDBELOW_MASKS XCHAL_INTLEVEL0_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL1_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL2_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL3_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL4_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL5_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL6_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL7_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL8_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL9_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL10_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL11_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL12_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL13_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL14_ANDBELOW_MASK \ - XCHAL_SEP XCHAL_INTLEVEL15_ANDBELOW_MASK - -#if 0 /*XCHAL_HAVE_NMI*/ -/* NMI "interrupt level" (for use with EXCSAVE_n, EPS_n, EPC_n, RFI n): */ -# define XCHAL_NMILEVEL (XCHAL_NUM_INTLEVELS+1) -#endif - -/* Array of levels of each possible interrupt: */ -#define XCHAL_INT_LEVELS XCHAL_INT0_LEVEL \ - XCHAL_SEP XCHAL_INT1_LEVEL \ - XCHAL_SEP XCHAL_INT2_LEVEL \ - XCHAL_SEP XCHAL_INT3_LEVEL \ - XCHAL_SEP XCHAL_INT4_LEVEL \ - XCHAL_SEP XCHAL_INT5_LEVEL \ - XCHAL_SEP XCHAL_INT6_LEVEL \ - XCHAL_SEP XCHAL_INT7_LEVEL \ - XCHAL_SEP XCHAL_INT8_LEVEL \ - XCHAL_SEP XCHAL_INT9_LEVEL \ - XCHAL_SEP XCHAL_INT10_LEVEL \ - XCHAL_SEP XCHAL_INT11_LEVEL \ - XCHAL_SEP XCHAL_INT12_LEVEL \ - XCHAL_SEP XCHAL_INT13_LEVEL \ - XCHAL_SEP XCHAL_INT14_LEVEL \ - XCHAL_SEP XCHAL_INT15_LEVEL \ - XCHAL_SEP XCHAL_INT16_LEVEL \ - XCHAL_SEP XCHAL_INT17_LEVEL \ - XCHAL_SEP XCHAL_INT18_LEVEL \ - XCHAL_SEP XCHAL_INT19_LEVEL \ - XCHAL_SEP XCHAL_INT20_LEVEL \ - XCHAL_SEP XCHAL_INT21_LEVEL \ - XCHAL_SEP XCHAL_INT22_LEVEL \ - XCHAL_SEP XCHAL_INT23_LEVEL \ - XCHAL_SEP XCHAL_INT24_LEVEL \ - XCHAL_SEP XCHAL_INT25_LEVEL \ - XCHAL_SEP XCHAL_INT26_LEVEL \ - XCHAL_SEP XCHAL_INT27_LEVEL \ - XCHAL_SEP XCHAL_INT28_LEVEL \ - XCHAL_SEP XCHAL_INT29_LEVEL \ - XCHAL_SEP XCHAL_INT30_LEVEL \ - XCHAL_SEP XCHAL_INT31_LEVEL - -/* Array of types of each possible interrupt: */ -#define XCHAL_INT_TYPES XCHAL_INT0_TYPE \ - XCHAL_SEP XCHAL_INT1_TYPE \ - XCHAL_SEP XCHAL_INT2_TYPE \ - XCHAL_SEP XCHAL_INT3_TYPE \ - XCHAL_SEP XCHAL_INT4_TYPE \ - XCHAL_SEP XCHAL_INT5_TYPE \ - XCHAL_SEP XCHAL_INT6_TYPE \ - XCHAL_SEP XCHAL_INT7_TYPE \ - XCHAL_SEP XCHAL_INT8_TYPE \ - XCHAL_SEP XCHAL_INT9_TYPE \ - XCHAL_SEP XCHAL_INT10_TYPE \ - XCHAL_SEP XCHAL_INT11_TYPE \ - XCHAL_SEP XCHAL_INT12_TYPE \ - XCHAL_SEP XCHAL_INT13_TYPE \ - XCHAL_SEP XCHAL_INT14_TYPE \ - XCHAL_SEP XCHAL_INT15_TYPE \ - XCHAL_SEP XCHAL_INT16_TYPE \ - XCHAL_SEP XCHAL_INT17_TYPE \ - XCHAL_SEP XCHAL_INT18_TYPE \ - XCHAL_SEP XCHAL_INT19_TYPE \ - XCHAL_SEP XCHAL_INT20_TYPE \ - XCHAL_SEP XCHAL_INT21_TYPE \ - XCHAL_SEP XCHAL_INT22_TYPE \ - XCHAL_SEP XCHAL_INT23_TYPE \ - XCHAL_SEP XCHAL_INT24_TYPE \ - XCHAL_SEP XCHAL_INT25_TYPE \ - XCHAL_SEP XCHAL_INT26_TYPE \ - XCHAL_SEP XCHAL_INT27_TYPE \ - XCHAL_SEP XCHAL_INT28_TYPE \ - XCHAL_SEP XCHAL_INT29_TYPE \ - XCHAL_SEP XCHAL_INT30_TYPE \ - XCHAL_SEP XCHAL_INT31_TYPE - -/* Array of masks of interrupts for each type of interrupt: */ -#define XCHAL_INTTYPE_MASKS XCHAL_INTTYPE_MASK_UNCONFIGURED \ - XCHAL_SEP XCHAL_INTTYPE_MASK_SOFTWARE \ - XCHAL_SEP XCHAL_INTTYPE_MASK_EXTERN_EDGE \ - XCHAL_SEP XCHAL_INTTYPE_MASK_EXTERN_LEVEL \ - XCHAL_SEP XCHAL_INTTYPE_MASK_TIMER \ - XCHAL_SEP XCHAL_INTTYPE_MASK_NMI \ - XCHAL_SEP XCHAL_INTTYPE_MASK_WRITE_ERROR \ - XCHAL_SEP XCHAL_INTTYPE_MASK_IDMA_DONE \ - XCHAL_SEP XCHAL_INTTYPE_MASK_IDMA_ERR \ - XCHAL_SEP XCHAL_INTTYPE_MASK_GS_ERR - -/* Interrupts that can be cleared using the INTCLEAR special register: */ -#define XCHAL_INTCLEARABLE_MASK (XCHAL_INTTYPE_MASK_SOFTWARE+XCHAL_INTTYPE_MASK_EXTERN_EDGE+XCHAL_INTTYPE_MASK_WRITE_ERROR) -/* Interrupts that can be triggered using the INTSET special register: */ -#define XCHAL_INTSETTABLE_MASK XCHAL_INTTYPE_MASK_SOFTWARE - -/* Array of interrupts assigned to each timer (CCOMPARE0 to CCOMPARE3): */ -#define XCHAL_TIMER_INTERRUPTS XCHAL_TIMER0_INTERRUPT \ - XCHAL_SEP XCHAL_TIMER1_INTERRUPT \ - XCHAL_SEP XCHAL_TIMER2_INTERRUPT \ - XCHAL_SEP XCHAL_TIMER3_INTERRUPT - - - -/* For backward compatibility and for the array macros, define macros for - * each unconfigured interrupt number (unfortunately, the value of - * XTHAL_INTTYPE_UNCONFIGURED is not zero): */ -#if XCHAL_NUM_INTERRUPTS == 0 -# define XCHAL_INT0_LEVEL 0 -# define XCHAL_INT0_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 1 -# define XCHAL_INT1_LEVEL 0 -# define XCHAL_INT1_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 2 -# define XCHAL_INT2_LEVEL 0 -# define XCHAL_INT2_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 3 -# define XCHAL_INT3_LEVEL 0 -# define XCHAL_INT3_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 4 -# define XCHAL_INT4_LEVEL 0 -# define XCHAL_INT4_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 5 -# define XCHAL_INT5_LEVEL 0 -# define XCHAL_INT5_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 6 -# define XCHAL_INT6_LEVEL 0 -# define XCHAL_INT6_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 7 -# define XCHAL_INT7_LEVEL 0 -# define XCHAL_INT7_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 8 -# define XCHAL_INT8_LEVEL 0 -# define XCHAL_INT8_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 9 -# define XCHAL_INT9_LEVEL 0 -# define XCHAL_INT9_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 10 -# define XCHAL_INT10_LEVEL 0 -# define XCHAL_INT10_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 11 -# define XCHAL_INT11_LEVEL 0 -# define XCHAL_INT11_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 12 -# define XCHAL_INT12_LEVEL 0 -# define XCHAL_INT12_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 13 -# define XCHAL_INT13_LEVEL 0 -# define XCHAL_INT13_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 14 -# define XCHAL_INT14_LEVEL 0 -# define XCHAL_INT14_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 15 -# define XCHAL_INT15_LEVEL 0 -# define XCHAL_INT15_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 16 -# define XCHAL_INT16_LEVEL 0 -# define XCHAL_INT16_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 17 -# define XCHAL_INT17_LEVEL 0 -# define XCHAL_INT17_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 18 -# define XCHAL_INT18_LEVEL 0 -# define XCHAL_INT18_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 19 -# define XCHAL_INT19_LEVEL 0 -# define XCHAL_INT19_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 20 -# define XCHAL_INT20_LEVEL 0 -# define XCHAL_INT20_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 21 -# define XCHAL_INT21_LEVEL 0 -# define XCHAL_INT21_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 22 -# define XCHAL_INT22_LEVEL 0 -# define XCHAL_INT22_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 23 -# define XCHAL_INT23_LEVEL 0 -# define XCHAL_INT23_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 24 -# define XCHAL_INT24_LEVEL 0 -# define XCHAL_INT24_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 25 -# define XCHAL_INT25_LEVEL 0 -# define XCHAL_INT25_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 26 -# define XCHAL_INT26_LEVEL 0 -# define XCHAL_INT26_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 27 -# define XCHAL_INT27_LEVEL 0 -# define XCHAL_INT27_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 28 -# define XCHAL_INT28_LEVEL 0 -# define XCHAL_INT28_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 29 -# define XCHAL_INT29_LEVEL 0 -# define XCHAL_INT29_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 30 -# define XCHAL_INT30_LEVEL 0 -# define XCHAL_INT30_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif -#if XCHAL_NUM_INTERRUPTS <= 31 -# define XCHAL_INT31_LEVEL 0 -# define XCHAL_INT31_TYPE XTHAL_INTTYPE_UNCONFIGURED -#endif - - -/* - * Masks and levels corresponding to each *external* interrupt. - */ - -#define XCHAL_EXTINT0_MASK (1 << XCHAL_EXTINT0_NUM) -#define XCHAL_EXTINT0_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT0_NUM) -#define XCHAL_EXTINT1_MASK (1 << XCHAL_EXTINT1_NUM) -#define XCHAL_EXTINT1_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT1_NUM) -#define XCHAL_EXTINT2_MASK (1 << XCHAL_EXTINT2_NUM) -#define XCHAL_EXTINT2_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT2_NUM) -#define XCHAL_EXTINT3_MASK (1 << XCHAL_EXTINT3_NUM) -#define XCHAL_EXTINT3_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT3_NUM) -#define XCHAL_EXTINT4_MASK (1 << XCHAL_EXTINT4_NUM) -#define XCHAL_EXTINT4_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT4_NUM) -#define XCHAL_EXTINT5_MASK (1 << XCHAL_EXTINT5_NUM) -#define XCHAL_EXTINT5_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT5_NUM) -#define XCHAL_EXTINT6_MASK (1 << XCHAL_EXTINT6_NUM) -#define XCHAL_EXTINT6_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT6_NUM) -#define XCHAL_EXTINT7_MASK (1 << XCHAL_EXTINT7_NUM) -#define XCHAL_EXTINT7_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT7_NUM) -#define XCHAL_EXTINT8_MASK (1 << XCHAL_EXTINT8_NUM) -#define XCHAL_EXTINT8_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT8_NUM) -#define XCHAL_EXTINT9_MASK (1 << XCHAL_EXTINT9_NUM) -#define XCHAL_EXTINT9_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT9_NUM) -#define XCHAL_EXTINT10_MASK (1 << XCHAL_EXTINT10_NUM) -#define XCHAL_EXTINT10_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT10_NUM) -#define XCHAL_EXTINT11_MASK (1 << XCHAL_EXTINT11_NUM) -#define XCHAL_EXTINT11_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT11_NUM) -#define XCHAL_EXTINT12_MASK (1 << XCHAL_EXTINT12_NUM) -#define XCHAL_EXTINT12_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT12_NUM) -#define XCHAL_EXTINT13_MASK (1 << XCHAL_EXTINT13_NUM) -#define XCHAL_EXTINT13_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT13_NUM) -#define XCHAL_EXTINT14_MASK (1 << XCHAL_EXTINT14_NUM) -#define XCHAL_EXTINT14_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT14_NUM) -#define XCHAL_EXTINT15_MASK (1 << XCHAL_EXTINT15_NUM) -#define XCHAL_EXTINT15_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT15_NUM) -#define XCHAL_EXTINT16_MASK (1 << XCHAL_EXTINT16_NUM) -#define XCHAL_EXTINT16_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT16_NUM) -#define XCHAL_EXTINT17_MASK (1 << XCHAL_EXTINT17_NUM) -#define XCHAL_EXTINT17_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT17_NUM) -#define XCHAL_EXTINT18_MASK (1 << XCHAL_EXTINT18_NUM) -#define XCHAL_EXTINT18_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT18_NUM) -#define XCHAL_EXTINT19_MASK (1 << XCHAL_EXTINT19_NUM) -#define XCHAL_EXTINT19_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT19_NUM) -#define XCHAL_EXTINT20_MASK (1 << XCHAL_EXTINT20_NUM) -#define XCHAL_EXTINT20_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT20_NUM) -#define XCHAL_EXTINT21_MASK (1 << XCHAL_EXTINT21_NUM) -#define XCHAL_EXTINT21_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT21_NUM) -#define XCHAL_EXTINT22_MASK (1 << XCHAL_EXTINT22_NUM) -#define XCHAL_EXTINT22_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT22_NUM) -#define XCHAL_EXTINT23_MASK (1 << XCHAL_EXTINT23_NUM) -#define XCHAL_EXTINT23_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT23_NUM) -#define XCHAL_EXTINT24_MASK (1 << XCHAL_EXTINT24_NUM) -#define XCHAL_EXTINT24_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT24_NUM) -#define XCHAL_EXTINT25_MASK (1 << XCHAL_EXTINT25_NUM) -#define XCHAL_EXTINT25_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT25_NUM) -#define XCHAL_EXTINT26_MASK (1 << XCHAL_EXTINT26_NUM) -#define XCHAL_EXTINT26_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT26_NUM) -#define XCHAL_EXTINT27_MASK (1 << XCHAL_EXTINT27_NUM) -#define XCHAL_EXTINT27_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT27_NUM) -#define XCHAL_EXTINT28_MASK (1 << XCHAL_EXTINT28_NUM) -#define XCHAL_EXTINT28_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT28_NUM) -#define XCHAL_EXTINT29_MASK (1 << XCHAL_EXTINT29_NUM) -#define XCHAL_EXTINT29_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT29_NUM) -#define XCHAL_EXTINT30_MASK (1 << XCHAL_EXTINT30_NUM) -#define XCHAL_EXTINT30_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT30_NUM) -#define XCHAL_EXTINT31_MASK (1 << XCHAL_EXTINT31_NUM) -#define XCHAL_EXTINT31_LEVEL XCHAL_INT_LEVEL(XCHAL_EXTINT31_NUM) - - -/*---------------------------------------------------------------------- - EXCEPTIONS and VECTORS - ----------------------------------------------------------------------*/ - -/* For backward compatibility ONLY -- DO NOT USE (will be removed in future release): */ -#define XCHAL_HAVE_OLD_EXC_ARCH XCHAL_HAVE_XEA1 /* (DEPRECATED) 1 if old exception architecture (XEA1), 0 otherwise (eg. XEA2) */ -#define XCHAL_HAVE_EXCM XCHAL_HAVE_XEA2 /* (DEPRECATED) 1 if PS.EXCM bit exists (currently equals XCHAL_HAVE_TLBS) */ -#ifdef XCHAL_USER_VECTOR_VADDR -#define XCHAL_PROGRAMEXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR -#define XCHAL_USEREXC_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR -#endif -#ifdef XCHAL_USER_VECTOR_PADDR -# define XCHAL_PROGRAMEXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR -# define XCHAL_USEREXC_VECTOR_PADDR XCHAL_USER_VECTOR_PADDR -#endif -#ifdef XCHAL_KERNEL_VECTOR_VADDR -# define XCHAL_STACKEDEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR -# define XCHAL_KERNELEXC_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR -#endif -#ifdef XCHAL_KERNEL_VECTOR_PADDR -# define XCHAL_STACKEDEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR -# define XCHAL_KERNELEXC_VECTOR_PADDR XCHAL_KERNEL_VECTOR_PADDR -#endif - -#if 0 -#if XCHAL_HAVE_DEBUG -# define XCHAL_DEBUG_VECTOR_VADDR XCHAL_INTLEVEL_VECTOR_VADDR(XCHAL_DEBUGLEVEL) -/* This one should only get defined if the corresponding intlevel paddr macro exists: */ -# define XCHAL_DEBUG_VECTOR_PADDR XCHAL_INTLEVEL_VECTOR_PADDR(XCHAL_DEBUGLEVEL) -#endif -#endif - -/* Indexing macros: */ -#define _XCHAL_INTLEVEL_VECTOR_VADDR(n) XCHAL_INTLEVEL ## n ## _VECTOR_VADDR -#define XCHAL_INTLEVEL_VECTOR_VADDR(n) _XCHAL_INTLEVEL_VECTOR_VADDR(n) /* n = 0 .. 15 */ - -/* - * General Exception Causes - * (values of EXCCAUSE special register set by general exceptions, - * which vector to the user, kernel, or double-exception vectors). - * - * DEPRECATED. Please use the equivalent EXCCAUSE_xxx macros - * defined in . (Note that these have slightly - * different names, they don't just have the XCHAL_ prefix removed.) - */ -#define XCHAL_EXCCAUSE_ILLEGAL_INSTRUCTION 0 /* Illegal Instruction */ -#define XCHAL_EXCCAUSE_SYSTEM_CALL 1 /* System Call */ -#define XCHAL_EXCCAUSE_INSTRUCTION_FETCH_ERROR 2 /* Instruction Fetch Error */ -#define XCHAL_EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error */ -#define XCHAL_EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt */ -#define XCHAL_EXCCAUSE_ALLOCA 5 /* Stack Extension Assist */ -#define XCHAL_EXCCAUSE_INTEGER_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero */ -#define XCHAL_EXCCAUSE_SPECULATION 7 /* Speculation */ -#define XCHAL_EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction */ -#define XCHAL_EXCCAUSE_UNALIGNED 9 /* Unaligned Load Store */ -/*10..15 reserved*/ -#define XCHAL_EXCCAUSE_ITLB_MISS 16 /* ITlb Miss Exception */ -#define XCHAL_EXCCAUSE_ITLB_MULTIHIT 17 /* ITlb Mutltihit Exception */ -#define XCHAL_EXCCAUSE_ITLB_PRIVILEGE 18 /* ITlb Privilege Exception */ -#define XCHAL_EXCCAUSE_ITLB_SIZE_RESTRICTION 19 /* ITlb Size Restriction Exception */ -#define XCHAL_EXCCAUSE_FETCH_CACHE_ATTRIBUTE 20 /* Fetch Cache Attribute Exception */ -/*21..23 reserved*/ -#define XCHAL_EXCCAUSE_DTLB_MISS 24 /* DTlb Miss Exception */ -#define XCHAL_EXCCAUSE_DTLB_MULTIHIT 25 /* DTlb Multihit Exception */ -#define XCHAL_EXCCAUSE_DTLB_PRIVILEGE 26 /* DTlb Privilege Exception */ -#define XCHAL_EXCCAUSE_DTLB_SIZE_RESTRICTION 27 /* DTlb Size Restriction Exception */ -#define XCHAL_EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28 /* Load Cache Attribute Exception */ -#define XCHAL_EXCCAUSE_STORE_CACHE_ATTRIBUTE 29 /* Store Cache Attribute Exception */ -/*30..31 reserved*/ -#define XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED 32 /* Coprocessor 0 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR1_DISABLED 33 /* Coprocessor 1 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR2_DISABLED 34 /* Coprocessor 2 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR3_DISABLED 35 /* Coprocessor 3 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR4_DISABLED 36 /* Coprocessor 4 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR5_DISABLED 37 /* Coprocessor 5 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR6_DISABLED 38 /* Coprocessor 6 disabled */ -#define XCHAL_EXCCAUSE_COPROCESSOR7_DISABLED 39 /* Coprocessor 7 disabled */ -/*40..63 reserved*/ - - -/* - * Miscellaneous special register fields. - * - * For each special register, and each field within each register: - * XCHAL__VALIDMASK is the set of bits defined in the register. - * XCHAL___BITS is the number of bits in the field. - * XCHAL___NUM is 2^bits, the number of possible values - * of the field. - * XCHAL___SHIFT is the position of the field within - * the register, starting from the least significant bit. - * - * DEPRECATED. Please use the equivalent macros defined in - * . (Note that these have different names.) - */ - -/* DBREAKC (special register number 160): */ -#define XCHAL_DBREAKC_VALIDMASK 0xC000003F -#define XCHAL_DBREAKC_MASK_BITS 6 -#define XCHAL_DBREAKC_MASK_NUM 64 -#define XCHAL_DBREAKC_MASK_SHIFT 0 -#define XCHAL_DBREAKC_MASK_MASK 0x0000003F -#define XCHAL_DBREAKC_LOADBREAK_BITS 1 -#define XCHAL_DBREAKC_LOADBREAK_NUM 2 -#define XCHAL_DBREAKC_LOADBREAK_SHIFT 30 -#define XCHAL_DBREAKC_LOADBREAK_MASK 0x40000000 -#define XCHAL_DBREAKC_STOREBREAK_BITS 1 -#define XCHAL_DBREAKC_STOREBREAK_NUM 2 -#define XCHAL_DBREAKC_STOREBREAK_SHIFT 31 -#define XCHAL_DBREAKC_STOREBREAK_MASK 0x80000000 -/* PS (special register number 230): */ -#define XCHAL_PS_VALIDMASK 0x00070F3F -#define XCHAL_PS_INTLEVEL_BITS 4 -#define XCHAL_PS_INTLEVEL_NUM 16 -#define XCHAL_PS_INTLEVEL_SHIFT 0 -#define XCHAL_PS_INTLEVEL_MASK 0x0000000F -#define XCHAL_PS_EXCM_BITS 1 -#define XCHAL_PS_EXCM_NUM 2 -#define XCHAL_PS_EXCM_SHIFT 4 -#define XCHAL_PS_EXCM_MASK 0x00000010 -#define XCHAL_PS_UM_BITS 1 -#define XCHAL_PS_UM_NUM 2 -#define XCHAL_PS_UM_SHIFT 5 -#define XCHAL_PS_UM_MASK 0x00000020 -#define XCHAL_PS_RING_BITS 2 -#define XCHAL_PS_RING_NUM 4 -#define XCHAL_PS_RING_SHIFT 6 -#define XCHAL_PS_RING_MASK 0x000000C0 -#define XCHAL_PS_OWB_BITS 4 -#define XCHAL_PS_OWB_NUM 16 -#define XCHAL_PS_OWB_SHIFT 8 -#define XCHAL_PS_OWB_MASK 0x00000F00 -#define XCHAL_PS_CALLINC_BITS 2 -#define XCHAL_PS_CALLINC_NUM 4 -#define XCHAL_PS_CALLINC_SHIFT 16 -#define XCHAL_PS_CALLINC_MASK 0x00030000 -#define XCHAL_PS_WOE_BITS 1 -#define XCHAL_PS_WOE_NUM 2 -#define XCHAL_PS_WOE_SHIFT 18 -#define XCHAL_PS_WOE_MASK 0x00040000 -/* EXCCAUSE (special register number 232): */ -#define XCHAL_EXCCAUSE_VALIDMASK 0x0000003F -#define XCHAL_EXCCAUSE_BITS 6 -#define XCHAL_EXCCAUSE_NUM 64 -#define XCHAL_EXCCAUSE_SHIFT 0 -#define XCHAL_EXCCAUSE_MASK 0x0000003F -/* DEBUGCAUSE (special register number 233): */ -#define XCHAL_DEBUGCAUSE_VALIDMASK 0x0000003F -#define XCHAL_DEBUGCAUSE_ICOUNT_BITS 1 -#define XCHAL_DEBUGCAUSE_ICOUNT_NUM 2 -#define XCHAL_DEBUGCAUSE_ICOUNT_SHIFT 0 -#define XCHAL_DEBUGCAUSE_ICOUNT_MASK 0x00000001 -#define XCHAL_DEBUGCAUSE_IBREAK_BITS 1 -#define XCHAL_DEBUGCAUSE_IBREAK_NUM 2 -#define XCHAL_DEBUGCAUSE_IBREAK_SHIFT 1 -#define XCHAL_DEBUGCAUSE_IBREAK_MASK 0x00000002 -#define XCHAL_DEBUGCAUSE_DBREAK_BITS 1 -#define XCHAL_DEBUGCAUSE_DBREAK_NUM 2 -#define XCHAL_DEBUGCAUSE_DBREAK_SHIFT 2 -#define XCHAL_DEBUGCAUSE_DBREAK_MASK 0x00000004 -#define XCHAL_DEBUGCAUSE_BREAK_BITS 1 -#define XCHAL_DEBUGCAUSE_BREAK_NUM 2 -#define XCHAL_DEBUGCAUSE_BREAK_SHIFT 3 -#define XCHAL_DEBUGCAUSE_BREAK_MASK 0x00000008 -#define XCHAL_DEBUGCAUSE_BREAKN_BITS 1 -#define XCHAL_DEBUGCAUSE_BREAKN_NUM 2 -#define XCHAL_DEBUGCAUSE_BREAKN_SHIFT 4 -#define XCHAL_DEBUGCAUSE_BREAKN_MASK 0x00000010 -#define XCHAL_DEBUGCAUSE_DEBUGINT_BITS 1 -#define XCHAL_DEBUGCAUSE_DEBUGINT_NUM 2 -#define XCHAL_DEBUGCAUSE_DEBUGINT_SHIFT 5 -#define XCHAL_DEBUGCAUSE_DEBUGINT_MASK 0x00000020 - - - - -/*---------------------------------------------------------------------- - TIMERS - ----------------------------------------------------------------------*/ - -/*#define XCHAL_HAVE_TIMERS XCHAL_HAVE_CCOUNT*/ - - - -/*---------------------------------------------------------------------- - INTERNAL I/D RAM/ROMs and XLMI - ----------------------------------------------------------------------*/ - -#define XCHAL_NUM_IROM XCHAL_NUM_INSTROM /* (DEPRECATED) */ -#define XCHAL_NUM_IRAM XCHAL_NUM_INSTRAM /* (DEPRECATED) */ -#define XCHAL_NUM_DROM XCHAL_NUM_DATAROM /* (DEPRECATED) */ -#define XCHAL_NUM_DRAM XCHAL_NUM_DATARAM /* (DEPRECATED) */ - -#define XCHAL_IROM0_VADDR XCHAL_INSTROM0_VADDR /* (DEPRECATED) */ -#define XCHAL_IROM0_PADDR XCHAL_INSTROM0_PADDR /* (DEPRECATED) */ -#define XCHAL_IROM0_SIZE XCHAL_INSTROM0_SIZE /* (DEPRECATED) */ -#define XCHAL_IROM1_VADDR XCHAL_INSTROM1_VADDR /* (DEPRECATED) */ -#define XCHAL_IROM1_PADDR XCHAL_INSTROM1_PADDR /* (DEPRECATED) */ -#define XCHAL_IROM1_SIZE XCHAL_INSTROM1_SIZE /* (DEPRECATED) */ -#define XCHAL_IRAM0_VADDR XCHAL_INSTRAM0_VADDR /* (DEPRECATED) */ -#define XCHAL_IRAM0_PADDR XCHAL_INSTRAM0_PADDR /* (DEPRECATED) */ -#define XCHAL_IRAM0_SIZE XCHAL_INSTRAM0_SIZE /* (DEPRECATED) */ -#define XCHAL_IRAM1_VADDR XCHAL_INSTRAM1_VADDR /* (DEPRECATED) */ -#define XCHAL_IRAM1_PADDR XCHAL_INSTRAM1_PADDR /* (DEPRECATED) */ -#define XCHAL_IRAM1_SIZE XCHAL_INSTRAM1_SIZE /* (DEPRECATED) */ -#define XCHAL_DROM0_VADDR XCHAL_DATAROM0_VADDR /* (DEPRECATED) */ -#define XCHAL_DROM0_PADDR XCHAL_DATAROM0_PADDR /* (DEPRECATED) */ -#define XCHAL_DROM0_SIZE XCHAL_DATAROM0_SIZE /* (DEPRECATED) */ -#define XCHAL_DROM1_VADDR XCHAL_DATAROM1_VADDR /* (DEPRECATED) */ -#define XCHAL_DROM1_PADDR XCHAL_DATAROM1_PADDR /* (DEPRECATED) */ -#define XCHAL_DROM1_SIZE XCHAL_DATAROM1_SIZE /* (DEPRECATED) */ -#define XCHAL_DRAM0_VADDR XCHAL_DATARAM0_VADDR /* (DEPRECATED) */ -#define XCHAL_DRAM0_PADDR XCHAL_DATARAM0_PADDR /* (DEPRECATED) */ -#define XCHAL_DRAM0_SIZE XCHAL_DATARAM0_SIZE /* (DEPRECATED) */ -#define XCHAL_DRAM1_VADDR XCHAL_DATARAM1_VADDR /* (DEPRECATED) */ -#define XCHAL_DRAM1_PADDR XCHAL_DATARAM1_PADDR /* (DEPRECATED) */ -#define XCHAL_DRAM1_SIZE XCHAL_DATARAM1_SIZE /* (DEPRECATED) */ - - - -/*---------------------------------------------------------------------- - CACHE - ----------------------------------------------------------------------*/ - - -/* Default PREFCTL value to enable prefetch. */ -#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0 -#define XCHAL_CACHE_PREFCTL_DEFAULT 0x00044 /* enabled, not aggressive */ -#elif XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RF_2014_0 -#define XCHAL_CACHE_PREFCTL_DEFAULT 0x01044 /* + enable prefetch to L1 */ -#elif ((XCHAL_PREFETCH_ENTRIES >= 16) && XCHAL_HAVE_CACHE_BLOCKOPS) -#define XCHAL_CACHE_PREFCTL_DEFAULT 0x81044 /* 12 entries for block ops */ -#elif ((XCHAL_PREFETCH_ENTRIES >= 8) && XCHAL_HAVE_CACHE_BLOCKOPS) -#define XCHAL_CACHE_PREFCTL_DEFAULT 0x51044 /* 5 entries for block ops */ -#else -#define XCHAL_CACHE_PREFCTL_DEFAULT 0x01044 /* 0 entries for block ops */ -#endif - - -/* Max for both I-cache and D-cache (used for general alignment): */ -#if XCHAL_ICACHE_LINESIZE > XCHAL_DCACHE_LINESIZE -# define XCHAL_CACHE_LINEWIDTH_MAX XCHAL_ICACHE_LINEWIDTH -# define XCHAL_CACHE_LINESIZE_MAX XCHAL_ICACHE_LINESIZE -#else -# define XCHAL_CACHE_LINEWIDTH_MAX XCHAL_DCACHE_LINEWIDTH -# define XCHAL_CACHE_LINESIZE_MAX XCHAL_DCACHE_LINESIZE -#endif - -#define XCHAL_ICACHE_SETSIZE (1< XCHAL_DCACHE_SETWIDTH -# define XCHAL_CACHE_SETWIDTH_MAX XCHAL_ICACHE_SETWIDTH -# define XCHAL_CACHE_SETSIZE_MAX XCHAL_ICACHE_SETSIZE -#else -# define XCHAL_CACHE_SETWIDTH_MAX XCHAL_DCACHE_SETWIDTH -# define XCHAL_CACHE_SETSIZE_MAX XCHAL_DCACHE_SETSIZE -#endif - -/* Instruction cache tag bits: */ -#define XCHAL_ICACHE_TAG_V_SHIFT 0 -#define XCHAL_ICACHE_TAG_V 0x1 /* valid bit */ -#if XCHAL_ICACHE_WAYS > 1 -# define XCHAL_ICACHE_TAG_F_SHIFT 1 -# define XCHAL_ICACHE_TAG_F 0x2 /* fill (LRU) bit */ -#else -# define XCHAL_ICACHE_TAG_F_SHIFT 0 -# define XCHAL_ICACHE_TAG_F 0 /* no fill (LRU) bit */ -#endif -#if XCHAL_ICACHE_LINE_LOCKABLE -# define XCHAL_ICACHE_TAG_L_SHIFT (XCHAL_ICACHE_TAG_F_SHIFT+1) -# define XCHAL_ICACHE_TAG_L (1 << XCHAL_ICACHE_TAG_L_SHIFT) /* lock bit */ -#else -# define XCHAL_ICACHE_TAG_L_SHIFT XCHAL_ICACHE_TAG_F_SHIFT -# define XCHAL_ICACHE_TAG_L 0 /* no lock bit */ -#endif -/* Data cache tag bits: */ -#define XCHAL_DCACHE_TAG_V_SHIFT 0 -#define XCHAL_DCACHE_TAG_V 0x1 /* valid bit */ -#if XCHAL_DCACHE_WAYS > 1 -# define XCHAL_DCACHE_TAG_F_SHIFT 1 -# define XCHAL_DCACHE_TAG_F 0x2 /* fill (LRU) bit */ -#else -# define XCHAL_DCACHE_TAG_F_SHIFT 0 -# define XCHAL_DCACHE_TAG_F 0 /* no fill (LRU) bit */ -#endif -#if XCHAL_DCACHE_IS_WRITEBACK -# define XCHAL_DCACHE_TAG_D_SHIFT (XCHAL_DCACHE_TAG_F_SHIFT+1) -# define XCHAL_DCACHE_TAG_D (1 << XCHAL_DCACHE_TAG_D_SHIFT) /* dirty bit */ -#else -# define XCHAL_DCACHE_TAG_D_SHIFT XCHAL_DCACHE_TAG_F_SHIFT -# define XCHAL_DCACHE_TAG_D 0 /* no dirty bit */ -#endif -#if XCHAL_DCACHE_LINE_LOCKABLE -# define XCHAL_DCACHE_TAG_L_SHIFT (XCHAL_DCACHE_TAG_D_SHIFT+1) -# define XCHAL_DCACHE_TAG_L (1 << XCHAL_DCACHE_TAG_L_SHIFT) /* lock bit */ -#else -# define XCHAL_DCACHE_TAG_L_SHIFT XCHAL_DCACHE_TAG_D_SHIFT -# define XCHAL_DCACHE_TAG_L 0 /* no lock bit */ -#endif - -/* Whether MEMCTL register has anything useful */ -#define XCHAL_USE_MEMCTL (((XCHAL_LOOP_BUFFER_SIZE > 0) || \ - XCHAL_DCACHE_IS_COHERENT || \ - XCHAL_HAVE_ICACHE_DYN_WAYS || \ - XCHAL_HAVE_DCACHE_DYN_WAYS) && \ - (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0)) - -#if XCHAL_DCACHE_IS_COHERENT -#define _MEMCTL_SNOOP_EN 0x02 /* Enable snoop */ -#else -#define _MEMCTL_SNOOP_EN 0x00 /* Don't enable snoop */ -#endif - -#if (XCHAL_LOOP_BUFFER_SIZE == 0) || XCHAL_ERRATUM_453 -#define _MEMCTL_L0IBUF_EN 0x00 /* No loop buffer or don't enable */ -#else -#define _MEMCTL_L0IBUF_EN 0x01 /* Enable loop buffer */ -#endif - -/* Default MEMCTL values: */ -#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS -#define XCHAL_CACHE_MEMCTL_DEFAULT (0xFFFFFF00 | _MEMCTL_L0IBUF_EN) -#else -#define XCHAL_CACHE_MEMCTL_DEFAULT (0x00000000 | _MEMCTL_L0IBUF_EN) -#endif - -#define XCHAL_SNOOP_LB_MEMCTL_DEFAULT (_MEMCTL_SNOOP_EN | _MEMCTL_L0IBUF_EN) - - -/*---------------------------------------------------------------------- - MMU - ----------------------------------------------------------------------*/ - -/* See for more details. */ - -/* Has different semantic in open source headers (where it means HAVE_PTP_MMU), - so comment out starting with RB-2008.3 release; later, might get - get reintroduced as a synonym for XCHAL_HAVE_PTP_MMU instead: */ -/*#define XCHAL_HAVE_MMU XCHAL_HAVE_TLBS*/ /* (DEPRECATED; use XCHAL_HAVE_TLBS instead) */ - -/* Indexing macros: */ -#define _XCHAL_ITLB_SET(n,_what) XCHAL_ITLB_SET ## n ## _what -#define XCHAL_ITLB_SET(n,what) _XCHAL_ITLB_SET(n, _ ## what ) -#define _XCHAL_ITLB_SET_E(n,i,_what) XCHAL_ITLB_SET ## n ## _E ## i ## _what -#define XCHAL_ITLB_SET_E(n,i,what) _XCHAL_ITLB_SET_E(n,i, _ ## what ) -#define _XCHAL_DTLB_SET(n,_what) XCHAL_DTLB_SET ## n ## _what -#define XCHAL_DTLB_SET(n,what) _XCHAL_DTLB_SET(n, _ ## what ) -#define _XCHAL_DTLB_SET_E(n,i,_what) XCHAL_DTLB_SET ## n ## _E ## i ## _what -#define XCHAL_DTLB_SET_E(n,i,what) _XCHAL_DTLB_SET_E(n,i, _ ## what ) -/* - * Example use: XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES) - * to get the value of XCHAL_ITLB_SET_ENTRIES where is the first auto-refill set. - */ - -/* Number of entries per autorefill way: */ -#define XCHAL_ITLB_ARF_ENTRIES (1< 0 && XCHAL_DTLB_ARF_WAYS > 0 && XCHAL_MMU_RINGS >= 2 -# define XCHAL_HAVE_PTP_MMU 1 /* have full MMU (with page table [autorefill] and protection) */ -#else -# define XCHAL_HAVE_PTP_MMU 0 /* don't have full MMU */ -#endif -#endif - -/* - * For full MMUs, report kernel RAM segment and kernel I/O segment static page mappings: - */ -#if XCHAL_HAVE_PTP_MMU && !XCHAL_HAVE_SPANNING_WAY -#define XCHAL_KSEG_CACHED_VADDR 0xD0000000 /* virt.addr of kernel RAM cached static map */ -#define XCHAL_KSEG_CACHED_PADDR 0x00000000 /* phys.addr of kseg_cached */ -#define XCHAL_KSEG_CACHED_SIZE 0x08000000 /* size in bytes of kseg_cached (assumed power of 2!!!) */ -#define XCHAL_KSEG_BYPASS_VADDR 0xD8000000 /* virt.addr of kernel RAM bypass (uncached) static map */ -#define XCHAL_KSEG_BYPASS_PADDR 0x00000000 /* phys.addr of kseg_bypass */ -#define XCHAL_KSEG_BYPASS_SIZE 0x08000000 /* size in bytes of kseg_bypass (assumed power of 2!!!) */ - -#define XCHAL_KIO_CACHED_VADDR 0xE0000000 /* virt.addr of kernel I/O cached static map */ -#define XCHAL_KIO_CACHED_PADDR 0xF0000000 /* phys.addr of kio_cached */ -#define XCHAL_KIO_CACHED_SIZE 0x10000000 /* size in bytes of kio_cached (assumed power of 2!!!) */ -#define XCHAL_KIO_BYPASS_VADDR 0xF0000000 /* virt.addr of kernel I/O bypass (uncached) static map */ -#define XCHAL_KIO_BYPASS_PADDR 0xF0000000 /* phys.addr of kio_bypass */ -#define XCHAL_KIO_BYPASS_SIZE 0x10000000 /* size in bytes of kio_bypass (assumed power of 2!!!) */ - -#define XCHAL_SEG_MAPPABLE_VADDR 0x00000000 /* start of largest non-static-mapped virtual addr area */ -#define XCHAL_SEG_MAPPABLE_SIZE 0xD0000000 /* size in bytes of " */ -/* define XCHAL_SEG_MAPPABLE2_xxx if more areas present, sorted in order of descending size. */ -#endif - - -/*---------------------------------------------------------------------- - MISC - ----------------------------------------------------------------------*/ - -/* Data alignment required if used for instructions: */ -#if XCHAL_INST_FETCH_WIDTH > XCHAL_DATA_WIDTH -# define XCHAL_ALIGN_MAX XCHAL_INST_FETCH_WIDTH -#else -# define XCHAL_ALIGN_MAX XCHAL_DATA_WIDTH -#endif - -/* - * Names kept for backward compatibility. - * (Here "RELEASE" is now a misnomer; these are product *versions*, not the releases - * under which they are released. In the T10##.# era there was no distinction.) - */ -#define XCHAL_HW_RELEASE_MAJOR XCHAL_HW_VERSION_MAJOR -#define XCHAL_HW_RELEASE_MINOR XCHAL_HW_VERSION_MINOR -#define XCHAL_HW_RELEASE_NAME XCHAL_HW_VERSION_NAME - - - - -/*---------------------------------------------------------------------- - COPROCESSORS and EXTRA STATE - ----------------------------------------------------------------------*/ - -#define XCHAL_EXTRA_SA_SIZE XCHAL_NCP_SA_SIZE -#define XCHAL_EXTRA_SA_ALIGN XCHAL_NCP_SA_ALIGN -#define XCHAL_CPEXTRA_SA_SIZE XCHAL_TOTAL_SA_SIZE -#define XCHAL_CPEXTRA_SA_ALIGN XCHAL_TOTAL_SA_ALIGN - -#if defined (_ASMLANGUAGE) || defined (__ASSEMBLER__) - - /* Invoked at start of save area load/store sequence macro to setup macro - * internal offsets. Not usually invoked directly. - * continue 0 for 1st sequence, 1 for subsequent consecutive ones. - * totofs offset from original ptr to next load/store location. - */ - .macro xchal_sa_start continue totofs - .ifeq \continue - .set .Lxchal_pofs_, 0 /* offset from original ptr to current \ptr */ - .set .Lxchal_ofs_, 0 /* offset from current \ptr to next load/store location */ - .endif - .if \totofs + 1 /* if totofs specified (not -1) */ - .set .Lxchal_ofs_, \totofs - .Lxchal_pofs_ /* specific offset from original ptr */ - .endif - .endm - - /* Align portion of save area and bring ptr in range if necessary. - * Used by save area load/store sequences. Not usually invoked directly. - * Allows combining multiple (sub-)sequences arbitrarily. - * ptr pointer to save area (may be off, see .Lxchal_pofs_) - * minofs,maxofs range of offset from cur ptr to next load/store loc; - * minofs <= 0 <= maxofs (0 must always be valid offset) - * range must be within +/- 30kB or so. - * ofsalign alignment granularity of minofs .. maxofs (pow of 2) - * (restriction on offset from ptr to next load/store loc) - * totalign align from orig ptr to next load/store loc (pow of 2) - */ - .macro xchal_sa_align ptr minofs maxofs ofsalign totalign - /* First align where we start accessing the next register - * per \totalign relative to original ptr (i.e. start of the save area): - */ - .set .Lxchal_ofs_, ((.Lxchal_pofs_ + .Lxchal_ofs_ + \totalign - 1) & -\totalign) - .Lxchal_pofs_ - /* If necessary, adjust \ptr to bring .Lxchal_ofs_ in acceptable range: */ - .if (((\maxofs) - .Lxchal_ofs_) & 0xC0000000) | ((.Lxchal_ofs_ - (\minofs)) & 0xC0000000) | (.Lxchal_ofs_ & (\ofsalign-1)) - .set .Ligmask, 0xFFFFFFFF /* TODO: optimize to addmi, per aligns and .Lxchal_ofs_ */ - addi \ptr, \ptr, (.Lxchal_ofs_ & .Ligmask) - .set .Lxchal_pofs_, .Lxchal_pofs_ + (.Lxchal_ofs_ & .Ligmask) - .set .Lxchal_ofs_, (.Lxchal_ofs_ & ~.Ligmask) - .endif - .endm - /* - * We could optimize for addi to expand to only addmi instead of - * "addmi;addi", where possible. Here's a partial example how: - * .set .Lmaxmask, -(\ofsalign) & -(\totalign) - * .if (((\maxofs) + ~.Lmaxmask + 1) & 0xFFFFFF00) && ((.Lxchal_ofs_ & ~.Lmaxmask) == 0) - * .set .Ligmask, 0xFFFFFF00 - * .elif ... ditto for negative ofs range ... - * .set .Ligmask, 0xFFFFFF00 - * .set ... adjust per offset ... - * .else - * .set .Ligmask, 0xFFFFFFFF - * .endif - */ - - /* Invoke this after xchal_XXX_{load,store} macros to restore \ptr. */ - .macro xchal_sa_ptr_restore ptr - .if .Lxchal_pofs_ - addi \ptr, \ptr, - .Lxchal_pofs_ - .set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ - .set .Lxchal_pofs_, 0 - .endif - .endm - - /* - * Use as eg: - * xchal_atmps_store a1, SOMEOFS, XCHAL_SA_NUM_ATMPS, a4, a5 - * xchal_ncp_load a2, a0,a3,a4,a5 - * xchal_atmps_load a1, SOMEOFS, XCHAL_SA_NUM_ATMPS, a4, a5 - * - * Specify only the ARs you *haven't* saved/restored already, up to 4. - * They *must* be the *last* ARs (in same order) specified to save area - * load/store sequences. In the example above, a0 and a3 were already - * saved/restored and unused (thus available) but a4 and a5 were not. - */ -#define xchal_atmps_store xchal_atmps_loadstore s32i, -#define xchal_atmps_load xchal_atmps_loadstore l32i, - .macro xchal_atmps_loadstore inst ptr offset nreq aa=0 ab=0 ac=0 ad=0 - .set .Lnsaved_, 0 - .irp reg,\aa,\ab,\ac,\ad - .ifeq 0x\reg ; .set .Lnsaved_,.Lnsaved_+1 ; .endif - .endr - .set .Laofs_, 0 - .irp reg,\aa,\ab,\ac,\ad - .ifgt (\nreq)-.Lnsaved_ - \inst \reg, \ptr, .Laofs_+\offset - .set .Laofs_,.Laofs_+4 - .set .Lnsaved_,.Lnsaved_+1 - .endif - .endr - .endm - -/*#define xchal_ncp_load_a2 xchal_ncp_load a2,a3,a4,a5,a6*/ -/*#define xchal_ncp_store_a2 xchal_ncp_store a2,a3,a4,a5,a6*/ -#define xchal_extratie_load xchal_ncptie_load -#define xchal_extratie_store xchal_ncptie_store -#define xchal_extratie_load_a2 xchal_ncptie_load a2,a3,a4,a5,a6 -#define xchal_extratie_store_a2 xchal_ncptie_store a2,a3,a4,a5,a6 -#define xchal_extra_load xchal_ncp_load -#define xchal_extra_store xchal_ncp_store -#define xchal_extra_load_a2 xchal_ncp_load a2,a3,a4,a5,a6 -#define xchal_extra_store_a2 xchal_ncp_store a2,a3,a4,a5,a6 -#define xchal_extra_load_funcbody xchal_ncp_load a2,a3,a4,a5,a6 -#define xchal_extra_store_funcbody xchal_ncp_store a2,a3,a4,a5,a6 -#define xchal_cp0_store_a2 xchal_cp0_store a2,a3,a4,a5,a6 -#define xchal_cp0_load_a2 xchal_cp0_load a2,a3,a4,a5,a6 -#define xchal_cp1_store_a2 xchal_cp1_store a2,a3,a4,a5,a6 -#define xchal_cp1_load_a2 xchal_cp1_load a2,a3,a4,a5,a6 -#define xchal_cp2_store_a2 xchal_cp2_store a2,a3,a4,a5,a6 -#define xchal_cp2_load_a2 xchal_cp2_load a2,a3,a4,a5,a6 -#define xchal_cp3_store_a2 xchal_cp3_store a2,a3,a4,a5,a6 -#define xchal_cp3_load_a2 xchal_cp3_load a2,a3,a4,a5,a6 -#define xchal_cp4_store_a2 xchal_cp4_store a2,a3,a4,a5,a6 -#define xchal_cp4_load_a2 xchal_cp4_load a2,a3,a4,a5,a6 -#define xchal_cp5_store_a2 xchal_cp5_store a2,a3,a4,a5,a6 -#define xchal_cp5_load_a2 xchal_cp5_load a2,a3,a4,a5,a6 -#define xchal_cp6_store_a2 xchal_cp6_store a2,a3,a4,a5,a6 -#define xchal_cp6_load_a2 xchal_cp6_load a2,a3,a4,a5,a6 -#define xchal_cp7_store_a2 xchal_cp7_store a2,a3,a4,a5,a6 -#define xchal_cp7_load_a2 xchal_cp7_load a2,a3,a4,a5,a6 - -/* Empty placeholder macros for undefined coprocessors: */ -#if (XCHAL_CP_MASK & ~XCHAL_CP_PORT_MASK) == 0 -# if XCHAL_CP0_SA_SIZE == 0 - .macro xchal_cp0_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp0_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP1_SA_SIZE == 0 - .macro xchal_cp1_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp1_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP2_SA_SIZE == 0 - .macro xchal_cp2_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp2_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP3_SA_SIZE == 0 - .macro xchal_cp3_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp3_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP4_SA_SIZE == 0 - .macro xchal_cp4_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp4_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP5_SA_SIZE == 0 - .macro xchal_cp5_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp5_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP6_SA_SIZE == 0 - .macro xchal_cp6_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp6_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -# if XCHAL_CP7_SA_SIZE == 0 - .macro xchal_cp7_store p a b c d continue=0 ofs=-1 select=-1 ; .endm - .macro xchal_cp7_load p a b c d continue=0 ofs=-1 select=-1 ; .endm -# endif -#endif - - /******************** - * Macros to create functions that save and restore the state of *any* TIE - * coprocessor (by dynamic index). - */ - - /* - * Macro that expands to the body of a function - * that stores the selected coprocessor's state (registers etc). - * Entry: a2 = ptr to save area in which to save cp state - * a3 = coprocessor number - * Exit: any register a2-a15 (?) may have been clobbered. - */ - .macro xchal_cpi_store_funcbody -#if (XCHAL_CP_MASK & ~XCHAL_CP_PORT_MASK) -# if XCHAL_CP0_SA_SIZE - bnez a3, 99f - xchal_cp0_store_a2 - j 90f -99: -# endif -# if XCHAL_CP1_SA_SIZE - bnei a3, 1, 99f - xchal_cp1_store_a2 - j 90f -99: -# endif -# if XCHAL_CP2_SA_SIZE - bnei a3, 2, 99f - xchal_cp2_store_a2 - j 90f -99: -# endif -# if XCHAL_CP3_SA_SIZE - bnei a3, 3, 99f - xchal_cp3_store_a2 - j 90f -99: -# endif -# if XCHAL_CP4_SA_SIZE - bnei a3, 4, 99f - xchal_cp4_store_a2 - j 90f -99: -# endif -# if XCHAL_CP5_SA_SIZE - bnei a3, 5, 99f - xchal_cp5_store_a2 - j 90f -99: -# endif -# if XCHAL_CP6_SA_SIZE - bnei a3, 6, 99f - xchal_cp6_store_a2 - j 90f -99: -# endif -# if XCHAL_CP7_SA_SIZE - bnei a3, 7, 99f - xchal_cp7_store_a2 - j 90f -99: -# endif -90: -#endif - .endm - - /* - * Macro that expands to the body of a function - * that loads the selected coprocessor's state (registers etc). - * Entry: a2 = ptr to save area from which to restore cp state - * a3 = coprocessor number - * Exit: any register a2-a15 (?) may have been clobbered. - */ - .macro xchal_cpi_load_funcbody -#if (XCHAL_CP_MASK & ~XCHAL_CP_PORT_MASK) -# if XCHAL_CP0_SA_SIZE - bnez a3, 99f - xchal_cp0_load_a2 - j 90f -99: -# endif -# if XCHAL_CP1_SA_SIZE - bnei a3, 1, 99f - xchal_cp1_load_a2 - j 90f -99: -# endif -# if XCHAL_CP2_SA_SIZE - bnei a3, 2, 99f - xchal_cp2_load_a2 - j 90f -99: -# endif -# if XCHAL_CP3_SA_SIZE - bnei a3, 3, 99f - xchal_cp3_load_a2 - j 90f -99: -# endif -# if XCHAL_CP4_SA_SIZE - bnei a3, 4, 99f - xchal_cp4_load_a2 - j 90f -99: -# endif -# if XCHAL_CP5_SA_SIZE - bnei a3, 5, 99f - xchal_cp5_load_a2 - j 90f -99: -# endif -# if XCHAL_CP6_SA_SIZE - bnei a3, 6, 99f - xchal_cp6_load_a2 - j 90f -99: -# endif -# if XCHAL_CP7_SA_SIZE - bnei a3, 7, 99f - xchal_cp7_load_a2 - j 90f -99: -# endif -90: -#endif - .endm - -#endif /*_ASMLANGUAGE or __ASSEMBLER__*/ - - -/* Other default macros for undefined coprocessors: */ -#ifndef XCHAL_CP0_NAME -# define XCHAL_CP0_NAME 0 -# define XCHAL_CP0_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP0_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP1_NAME -# define XCHAL_CP1_NAME 0 -# define XCHAL_CP1_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP1_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP2_NAME -# define XCHAL_CP2_NAME 0 -# define XCHAL_CP2_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP2_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP3_NAME -# define XCHAL_CP3_NAME 0 -# define XCHAL_CP3_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP3_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP4_NAME -# define XCHAL_CP4_NAME 0 -# define XCHAL_CP4_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP4_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP5_NAME -# define XCHAL_CP5_NAME 0 -# define XCHAL_CP5_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP5_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP6_NAME -# define XCHAL_CP6_NAME 0 -# define XCHAL_CP6_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP6_SA_CONTENTS_LIBDB /* empty */ -#endif -#ifndef XCHAL_CP7_NAME -# define XCHAL_CP7_NAME 0 -# define XCHAL_CP7_SA_CONTENTS_LIBDB_NUM 0 -# define XCHAL_CP7_SA_CONTENTS_LIBDB /* empty */ -#endif - -#if XCHAL_CP_MASK == 0 -/* Filler info for unassigned coprocessors, to simplify arrays etc: */ -#define XCHAL_CP0_SA_SIZE 0 -#define XCHAL_CP0_SA_ALIGN 1 -#define XCHAL_CP1_SA_SIZE 0 -#define XCHAL_CP1_SA_ALIGN 1 -#define XCHAL_CP2_SA_SIZE 0 -#define XCHAL_CP2_SA_ALIGN 1 -#define XCHAL_CP3_SA_SIZE 0 -#define XCHAL_CP3_SA_ALIGN 1 -#define XCHAL_CP4_SA_SIZE 0 -#define XCHAL_CP4_SA_ALIGN 1 -#define XCHAL_CP5_SA_SIZE 0 -#define XCHAL_CP5_SA_ALIGN 1 -#define XCHAL_CP6_SA_SIZE 0 -#define XCHAL_CP6_SA_ALIGN 1 -#define XCHAL_CP7_SA_SIZE 0 -#define XCHAL_CP7_SA_ALIGN 1 -#endif - - -/* Indexing macros: */ -#define _XCHAL_CP_SA_SIZE(n) XCHAL_CP ## n ## _SA_SIZE -#define XCHAL_CP_SA_SIZE(n) _XCHAL_CP_SA_SIZE(n) /* n = 0 .. 7 */ -#define _XCHAL_CP_SA_ALIGN(n) XCHAL_CP ## n ## _SA_ALIGN -#define XCHAL_CP_SA_ALIGN(n) _XCHAL_CP_SA_ALIGN(n) /* n = 0 .. 7 */ - -#define XCHAL_CPEXTRA_SA_SIZE_TOR2 XCHAL_CPEXTRA_SA_SIZE /* Tor2Beta only - do not use */ - -/* Link-time HAL global variables that report coprocessor numbers by name - (names are case-preserved from the original TIE): */ -#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) -# define _XCJOIN(a,b) a ## b -# define XCJOIN(a,b) _XCJOIN(a,b) -# ifdef XCHAL_CP0_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP0_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP0_IDENT); -# endif -# ifdef XCHAL_CP1_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP1_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP1_IDENT); -# endif -# ifdef XCHAL_CP2_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP2_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP2_IDENT); -# endif -# ifdef XCHAL_CP3_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP3_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP3_IDENT); -# endif -# ifdef XCHAL_CP4_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP4_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP4_IDENT); -# endif -# ifdef XCHAL_CP5_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP5_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP5_IDENT); -# endif -# ifdef XCHAL_CP6_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP6_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP6_IDENT); -# endif -# ifdef XCHAL_CP7_NAME -extern const unsigned char XCJOIN(Xthal_cp_id_,XCHAL_CP7_IDENT); -extern const unsigned int XCJOIN(Xthal_cp_mask_,XCHAL_CP7_IDENT); -# endif -#endif - - - - -/*---------------------------------------------------------------------- - DERIVED - ----------------------------------------------------------------------*/ - -#if XCHAL_HAVE_BE -#define XCHAL_INST_ILLN 0xD60F /* 2-byte illegal instruction, msb-first */ -#define XCHAL_INST_ILLN_BYTE0 0xD6 /* 2-byte illegal instruction, 1st byte */ -#define XCHAL_INST_ILLN_BYTE1 0x0F /* 2-byte illegal instruction, 2nd byte */ -#else -#define XCHAL_INST_ILLN 0xF06D /* 2-byte illegal instruction, lsb-first */ -#define XCHAL_INST_ILLN_BYTE0 0x6D /* 2-byte illegal instruction, 1st byte */ -#define XCHAL_INST_ILLN_BYTE1 0xF0 /* 2-byte illegal instruction, 2nd byte */ -#endif -/* Belongs in xtensa/hal.h: */ -#define XTHAL_INST_ILL 0x000000 /* 3-byte illegal instruction */ - - -/* - * Because information as to exactly which hardware version is targeted - * by a given software build is not always available, compile-time HAL - * Hardware-Release "_AT" macros are fuzzy (return 0, 1, or XCHAL_MAYBE): - * (Here "RELEASE" is now a misnomer; these are product *versions*, not the releases - * under which they are released. In the T10##.# era there was no distinction.) - */ -#if XCHAL_HW_CONFIGID_RELIABLE -# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) (XTHAL_REL_LE( XCHAL_HW_VERSION_MAJOR,XCHAL_HW_VERSION_MINOR, major,minor ) ? 1 : 0) -# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) (XTHAL_REL_GE( XCHAL_HW_VERSION_MAJOR,XCHAL_HW_VERSION_MINOR, major,minor ) ? 1 : 0) -# define XCHAL_HW_RELEASE_AT(major,minor) (XTHAL_REL_EQ( XCHAL_HW_VERSION_MAJOR,XCHAL_HW_VERSION_MINOR, major,minor ) ? 1 : 0) -# define XCHAL_HW_RELEASE_MAJOR_AT(major) ((XCHAL_HW_VERSION_MAJOR == (major)) ? 1 : 0) -#else -# define XCHAL_HW_RELEASE_AT_OR_BELOW(major,minor) ( ((major) < 1040 && XCHAL_HAVE_XEA2) ? 0 \ - : ((major) > 1050 && XCHAL_HAVE_XEA1) ? 1 \ - : XTHAL_MAYBE ) -# define XCHAL_HW_RELEASE_AT_OR_ABOVE(major,minor) ( ((major) >= 2000 && XCHAL_HAVE_XEA1) ? 0 \ - : (XTHAL_REL_LE(major,minor, 1040,0) && XCHAL_HAVE_XEA2) ? 1 \ - : XTHAL_MAYBE ) -# define XCHAL_HW_RELEASE_AT(major,minor) ( (((major) < 1040 && XCHAL_HAVE_XEA2) || \ - ((major) >= 2000 && XCHAL_HAVE_XEA1)) ? 0 : XTHAL_MAYBE) -# define XCHAL_HW_RELEASE_MAJOR_AT(major) XCHAL_HW_RELEASE_AT(major,0) -#endif - - -#endif /*XTENSA_CONFIG_CORE_H*/ - diff --git a/src/arch/xtensa/include/xtensa/core-macros.h b/src/arch/xtensa/include/xtensa/core-macros.h deleted file mode 100644 index 8b8a16b49a30..000000000000 --- a/src/arch/xtensa/include/xtensa/core-macros.h +++ /dev/null @@ -1,506 +0,0 @@ -/* - * xtensa/core-macros.h -- C specific definitions - * that depend on CORE configuration - */ - -/* - * Copyright (c) 2012 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTENSA_CACHE_H -#define XTENSA_CACHE_H - -#include - -/* Only define things for C code. */ -#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) - - - -/*************************** CACHE ***************************/ - -/* All the macros are in the lower case now and some of them - * share the name with the existing functions from hal.h. - * Including this header file will define XTHAL_USE_CACHE_MACROS - * which directs hal.h not to use the functions. - */ - -/* - * Single-cache-line operations in C-callable inline assembly. - * Essentially macro versions (uppercase) of: - * - * xthal_icache_line_invalidate(void *addr); - * xthal_icache_line_lock(void *addr); - * xthal_icache_line_unlock(void *addr); - * xthal_icache_sync(void); - * - * NOTE: unlike the above functions, the following macros do NOT - * execute the xthal_icache_sync() as part of each line operation. - * This sync must be called explicitly by the caller. This is to - * allow better optimization when operating on more than one line. - * - * xthal_dcache_line_invalidate(void *addr); - * xthal_dcache_line_writeback(void *addr); - * xthal_dcache_line_writeback_inv(void *addr); - * xthal_dcache_line_lock(void *addr); - * xthal_dcache_line_unlock(void *addr); - * xthal_dcache_sync(void); - * xthal_dcache_line_prefetch_for_write(void *addr); - * xthal_dcache_line_prefetch_for_read(void *addr); - * - * All are made memory-barriers, given that's how they're typically used - * (ops operate on a whole line, so clobbers all memory not just *addr). - * - * NOTE: All the block block cache ops and line prefetches are implemented - * using intrinsics so they are better optimized regarding memory barriers etc. - * - * All block downgrade functions exist in two forms: with and without - * the 'max' parameter: This parameter allows compiler to optimize - * the functions whenever the parameter is smaller than the cache size. - * - * xthal_dcache_block_invalidate(void *addr, unsigned size); - * xthal_dcache_block_writeback(void *addr, unsigned size); - * xthal_dcache_block_writeback_inv(void *addr, unsigned size); - * xthal_dcache_block_invalidate_max(void *addr, unsigned size, unsigned max); - * xthal_dcache_block_writeback_max(void *addr, unsigned size, unsigned max); - * xthal_dcache_block_writeback_inv_max(void *addr, unsigned size, unsigned max); - * - * xthal_dcache_block_prefetch_for_read(void *addr, unsigned size); - * xthal_dcache_block_prefetch_for_write(void *addr, unsigned size); - * xthal_dcache_block_prefetch_modify(void *addr, unsigned size); - * xthal_dcache_block_prefetch_read_write(void *addr, unsigned size); - * xthal_dcache_block_prefetch_for_read_grp(void *addr, unsigned size); - * xthal_dcache_block_prefetch_for_write_grp(void *addr, unsigned size); - * xthal_dcache_block_prefetch_modify_grp(void *addr, unsigned size); - * xthal_dcache_block_prefetch_read_write_grp(void *addr, unsigned size) - * - * xthal_dcache_block_wait(); - * xthal_dcache_block_required_wait(); - * xthal_dcache_block_abort(); - * xthal_dcache_block_prefetch_end(); - * xthal_dcache_block_newgrp(); - */ - -/*** INSTRUCTION CACHE ***/ - -#define XTHAL_USE_CACHE_MACROS - -#if XCHAL_ICACHE_SIZE > 0 -# define xthal_icache_line_invalidate(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("ihi %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -#else -# define xthal_icache_line_invalidate(addr) do {/*nothing*/} while(0) -#endif - -#if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE -# define xthal_icache_line_lock(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("ipfl %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -# define xthal_icache_line_unlock(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("ihu %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -#else -# define xthal_icache_line_lock(addr) do {/*nothing*/} while(0) -# define xthal_icache_line_unlock(addr) do {/*nothing*/} while(0) -#endif - -/* - * Even if a config doesn't have caches, an isync is still needed - * when instructions in any memory are modified, whether by a loader - * or self-modifying code. Therefore, this macro always produces - * an isync, whether or not an icache is present. - */ -#define xthal_icache_sync() \ - __asm__ __volatile__("isync":::"memory") - - -/*** DATA CACHE ***/ - -#if XCHAL_DCACHE_SIZE > 0 - -# include - -# define xthal_dcache_line_invalidate(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("dhi %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -# define xthal_dcache_line_writeback(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("dhwb %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -# define xthal_dcache_line_writeback_inv(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("dhwbi %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -# define xthal_dcache_sync() \ - __asm__ __volatile__("" /*"dsync"?*/:::"memory") -# define xthal_dcache_line_prefetch_for_read(addr) do { \ - XT_DPFR((const int*)addr, 0); \ - } while(0) -#else -# define xthal_dcache_line_invalidate(addr) do {/*nothing*/} while(0) -# define xthal_dcache_line_writeback(addr) do {/*nothing*/} while(0) -# define xthal_dcache_line_writeback_inv(addr) do {/*nothing*/} while(0) -# define xthal_dcache_sync() __asm__ __volatile__("":::"memory") -# define xthal_dcache_line_prefetch_for_read(addr) do {/*nothing*/} while(0) -#endif - -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE -# define xthal_dcache_line_lock(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("dpfl %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -# define xthal_dcache_line_unlock(addr) do { void *__a = (void*)(addr); \ - __asm__ __volatile__("dhu %0, 0" :: "a"(__a) : "memory"); \ - } while(0) -#else -# define xthal_dcache_line_lock(addr) do {/*nothing*/} while(0) -# define xthal_dcache_line_unlock(addr) do {/*nothing*/} while(0) -#endif - -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK - -# define xthal_dcache_line_prefetch_for_write(addr) do { \ - XT_DPFW((const int*)addr, 0); \ - } while(0) -#else -# define xthal_dcache_line_prefetch_for_write(addr) do {/*nothing*/} while(0) -#endif - - -/***** Block Operations *****/ - -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS - -/* upgrades */ - -# define _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, type) \ - { \ - type((const int*)addr, size); \ - } - -/*downgrades */ - -# define _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type) \ - unsigned _s = size; \ - unsigned _a = (unsigned) addr; \ - do { \ - unsigned __s = (_s > XCHAL_DCACHE_SIZE) ? \ - XCHAL_DCACHE_SIZE : _s; \ - type((const int*)_a, __s); \ - _s -= __s; \ - _a += __s; \ - } while(_s > 0); - -# define _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, type, max) \ - if (max <= XCHAL_DCACHE_SIZE) { \ - unsigned _s = size; \ - unsigned _a = (unsigned) addr; \ - type((const int*)_a, _s); \ - } \ - else { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type); \ - } - -# define xthal_dcache_block_invalidate(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHI_B); \ - } while(0) -# define xthal_dcache_block_writeback(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWB_B); \ - } while(0) -# define xthal_dcache_block_writeback_inv(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWBI_B); \ - } while(0) - -# define xthal_dcache_block_invalidate_max(addr, size, max) do { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHI_B, max); \ - } while(0) -# define xthal_dcache_block_writeback_max(addr, size, max) do { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWB_B, max); \ - } while(0) -# define xthal_dcache_block_writeback_inv_max(addr, size, max) do { \ - _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWBI_B, max); \ - } while(0) - -/* upgrades that are performed even with write-thru caches */ - -# define xthal_dcache_block_prefetch_read_write(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \ - } while(0) -# define xthal_dcache_block_prefetch_read_write_grp(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \ - } while(0) -# define xthal_dcache_block_prefetch_for_read(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_B); \ - } while(0) -# define xthal_dcache_block_prefetch_for_read_grp(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_BF); \ - } while(0) - -/* abort all or end optional block cache operations */ -# define xthal_dcache_block_abort() do { \ - XT_PFEND_A(); \ - } while(0) -# define xthal_dcache_block_end() do { \ - XT_PFEND_O(); \ - } while(0) - -/* wait for all/required block cache operations to finish */ -# define xthal_dcache_block_wait() do { \ - XT_PFWAIT_A(); \ - } while(0) -# define xthal_dcache_block_required_wait() do { \ - XT_PFWAIT_R(); \ - } while(0) -/* Start a new group */ -# define xthal_dcache_block_newgrp() do { \ - XT_PFNXT_F(); \ - } while(0) -#else -# define xthal_dcache_block_invalidate(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_writeback(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_writeback_inv(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_invalidate_max(addr, size, max) do {/*nothing*/} while(0) -# define xthal_dcache_block_writeback_max(addr, size, max) do {/*nothing*/} while(0) -# define xthal_dcache_block_writeback_inv_max(addr, size, max) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_read_write(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_read_write_grp(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_for_read(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_for_read_grp(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_end() do {/*nothing*/} while(0) -# define xthal_dcache_block_abort() do {/*nothing*/} while(0) -# define xthal_dcache_block_wait() do {/*nothing*/} while(0) -# define xthal_dcache_block_required_wait() do {/*nothing*/} while(0) -# define xthal_dcache_block_newgrp() do {/*nothing*/} while(0) -#endif - -#if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS && XCHAL_DCACHE_IS_WRITEBACK - -# define xthal_dcache_block_prefetch_for_write(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \ - } while(0) -# define xthal_dcache_block_prefetch_modify(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_B); \ - } while(0) -# define xthal_dcache_block_prefetch_for_write_grp(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \ - } while(0) -# define xthal_dcache_block_prefetch_modify_grp(addr, size) do { \ - _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_BF); \ - } while(0) -#else -# define xthal_dcache_block_prefetch_for_write(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_modify(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_for_write_grp(addr, size) do {/*nothing*/} while(0) -# define xthal_dcache_block_prefetch_modify_grp(addr, size) do {/*nothing*/} while(0) -#endif - -/*************************** INTERRUPTS ***************************/ - -/* - * Macro versions of: - * unsigned xthal_get_intenable( void ); - * void xthal_set_intenable( unsigned ); - * unsigned xthal_get_interrupt( void ); - * void xthal_set_intset( unsigned ); - * void xthal_set_intclear( unsigned ); - * unsigned xthal_get_ccount(void); - * void xthal_set_ccompare(int, unsigned); - * unsigned xthal_get_ccompare(int); - * - * NOTE: for {set,get}_ccompare, the first argument MUST be a decimal constant. - */ - -#if XCHAL_HAVE_INTERRUPTS -# define XTHAL_GET_INTENABLE() ({ int __intenable; \ - __asm__("rsr.intenable %0" : "=a"(__intenable)); \ - __intenable; }) -# define XTHAL_SET_INTENABLE(v) do { int __intenable = (int)(v); \ - __asm__ __volatile__("wsr.intenable %0" :: "a"(__intenable):"memory"); \ - } while(0) -# define XTHAL_GET_INTERRUPT() ({ int __interrupt; \ - __asm__("rsr.interrupt %0" : "=a"(__interrupt)); \ - __interrupt; }) -# define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \ - __asm__ __volatile__("wsr.intset %0" :: "a"(__interrupt):"memory"); \ - } while(0) -# define XTHAL_SET_INTCLEAR(v) do { int __interrupt = (int)(v); \ - __asm__ __volatile__("wsr.intclear %0" :: "a"(__interrupt):"memory"); \ - } while(0) -# define XTHAL_GET_CCOUNT() ({ int __ccount; \ - __asm__("rsr.ccount %0" : "=a"(__ccount)); \ - __ccount; }) -# define XTHAL_SET_CCOUNT(v) do { int __ccount = (int)(v); \ - __asm__ __volatile__("wsr.ccount %0" :: "a"(__ccount):"memory"); \ - } while(0) -# define _XTHAL_GET_CCOMPARE(n) ({ int __ccompare; \ - __asm__("rsr.ccompare" #n " %0" : "=a"(__ccompare)); \ - __ccompare; }) -# define XTHAL_GET_CCOMPARE(n) _XTHAL_GET_CCOMPARE(n) -# define _XTHAL_SET_CCOMPARE(n,v) do { int __ccompare = (int)(v); \ - __asm__ __volatile__("wsr.ccompare" #n " %0 ; esync" :: "a"(__ccompare):"memory"); \ - } while(0) -# define XTHAL_SET_CCOMPARE(n,v) _XTHAL_SET_CCOMPARE(n,v) -#else -# define XTHAL_GET_INTENABLE() 0 -# define XTHAL_SET_INTENABLE(v) do {/*nothing*/} while(0) -# define XTHAL_GET_INTERRUPT() 0 -# define XTHAL_SET_INTSET(v) do {/*nothing*/} while(0) -# define XTHAL_SET_INTCLEAR(v) do {/*nothing*/} while(0) -# define XTHAL_GET_CCOUNT() 0 -# define XTHAL_SET_CCOUNT(v) do {/*nothing*/} while(0) -# define XTHAL_GET_CCOMPARE(n) 0 -# define XTHAL_SET_CCOMPARE(n,v) do {/*nothing*/} while(0) -#endif - -/* New functions added to accomodate XEA3 and allow deprecation of older - functions. For this release they just map to the older ones. */ - -/* Enables the specified interrupt. */ -static inline void xthal_interrupt_enable(unsigned intnum) -{ - xthal_int_enable(1 << intnum); -} - -/* Disables the specified interrupt. */ -static inline void xthal_interrupt_disable(unsigned intnum) -{ - xthal_int_disable(1 << intnum); -} - -/* Triggers the specified interrupt. */ -static inline void xthal_interrupt_trigger(unsigned intnum) -{ - xthal_set_intset(1 << intnum); -} - -/* Clears the specified interrupt. */ -static inline void xthal_interrupt_clear(unsigned intnum) -{ - xthal_set_intclear(1 << intnum); -} - - -/*************************** MISC ***************************/ - -/* - * Macro or inline versions of: - * void xthal_clear_regcached_code( void ); - * unsigned xthal_get_prid( void ); - * unsigned xthal_compare_and_set( int *addr, int testval, int setval ); - */ - -#if XCHAL_HAVE_LOOPS -# define XTHAL_CLEAR_REGCACHED_CODE() \ - __asm__ __volatile__("wsr.lcount %0" :: "a"(0) : "memory") -#else -# define XTHAL_CLEAR_REGCACHED_CODE() do {/*nothing*/} while(0) -#endif - -#if XCHAL_HAVE_PRID -# define XTHAL_GET_PRID() ({ int __prid; \ - __asm__("rsr.prid %0" : "=a"(__prid)); \ - __prid; }) -#else -# define XTHAL_GET_PRID() 0 -#endif - - -static inline unsigned XTHAL_COMPARE_AND_SET( int *addr, int testval, int setval ) -{ - int result; - -#if XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION_MAJOR >= 2200 - __asm__ __volatile__ ( - " wsr.scompare1 %2 \n" - " s32c1i %0, %3, 0 \n" - : "=a"(result) : "0" (setval), "a" (testval), "a" (addr) - : "memory"); -#elif XCHAL_HAVE_INTERRUPTS - int tmp; - __asm__ __volatile__ ( - " rsil %4, 15 \n" // %4 == saved ps - " l32i %0, %3, 0 \n" // %0 == value to test, return val - " bne %2, %0, 9f \n" // test - " s32i %1, %3, 0 \n" // write the new value - "9: wsr.ps %4 ; rsync \n" // restore the PS - : "=a"(result) - : "0" (setval), "a" (testval), "a" (addr), "a" (tmp) - : "memory"); -#else - __asm__ __volatile__ ( - " l32i %0, %3, 0 \n" // %0 == value to test, return val - " bne %2, %0, 9f \n" // test - " s32i %1, %3, 0 \n" // write the new value - "9: \n" - : "=a"(result) : "0" (setval), "a" (testval), "a" (addr) - : "memory"); -#endif - return result; -} - -#if XCHAL_HAVE_EXTERN_REGS - -static inline unsigned XTHAL_RER (unsigned int reg) -{ - unsigned result; - - __asm__ __volatile__ ( - " rer %0, %1" - : "=a" (result) : "a" (reg) : "memory"); - - return result; -} - -static inline void XTHAL_WER (unsigned reg, unsigned value) -{ - __asm__ __volatile__ ( - " wer %0, %1" - : : "a" (value), "a" (reg) : "memory"); -} - -#endif /* XCHAL_HAVE_EXTERN_REGS */ - -/* - * Sets a single entry at 'index' within the MPU - * - * The caller must ensure that the resulting MPU map is ordered. - */ -static inline void xthal_mpu_set_entry (xthal_MPU_entry entry) -{ -#if XCHAL_HAVE_MPU - __asm__ __volatile__("j 1f\n\t.align 8\n\t1: memw\n\twptlb %0, %1\n\t" : : "a" (entry.at), "a"(entry.as)); -#endif -} - -/* Same as xthal_mpu_set_entry except that this function must not be used to change the MPU entry - * for the currently executing instruction ... use xthal_mpu_set_entry instead. */ -static inline void xthal_mpu_set_entry_ (xthal_MPU_entry entry) -{ -#if XCHAL_HAVE_MPU - __asm__ __volatile__("wptlb %0, %1\n\t" : : "a" (entry.at), "a"(entry.as)); -#endif -} - - - -#endif /* C code */ - -#endif /*XTENSA_CACHE_H*/ - diff --git a/src/arch/xtensa/include/xtensa/coreasm.h b/src/arch/xtensa/include/xtensa/coreasm.h deleted file mode 100644 index 4113b7a0b041..000000000000 --- a/src/arch/xtensa/include/xtensa/coreasm.h +++ /dev/null @@ -1,1201 +0,0 @@ -/* - * xtensa/coreasm.h -- assembler-specific definitions that depend on - * CORE configuration. - * - * Source for configuration-independent binaries (which link in a - * configuration-specific HAL library) must NEVER include this file. - * It is perfectly normal, however, for the HAL itself to include this file. - * - * This file must NOT include xtensa/config/system.h. Any assembler - * header file that depends on system information should likely go - * in a new systemasm.h (or sysasm.h) header file. - * - * NOTE: macro beqi32 is NOT configuration-dependent, and is placed - * here until we have a proper configuration-independent header file. - */ - -/* - * Copyright (c) 2000-2018 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTENSA_COREASM_H -#define XTENSA_COREASM_H - -/* - * Tell header files this is assembly source, so they can avoid non-assembler - * definitions (eg. C types etc): - */ -#ifndef _ASMLANGUAGE /* conditionalize to avoid cpp warnings (3rd parties might use same macro) */ -#define _ASMLANGUAGE -#endif - -#include -#include -#include -#include - -/* - * Assembly-language specific definitions (assembly macros, etc.). - */ - -/*---------------------------------------------------------------------- - * find_ms_setbit - * - * This macro finds the most significant bit that is set in - * and return its index + in , or - 1 if is zero. - * The index counts starting at zero for the lsbit, so the return - * value ranges from -1 (no bit set) to +31 (msbit set). - * - * Parameters: - * destination address register (any register) - * source address register - * temporary address register (must be different than ) - * constant value added to result (usually 0 or 1) - * On entry: - * = undefined if different than - * = value whose most significant set bit is to be found - * = undefined - * no other registers are used by this macro. - * On exit: - * = + index of msbit set in original , - * = - 1 if original was zero. - * clobbered (if not ) - * clobbered (if not ) - * Example: - * find_ms_setbit a0, a4, a0, 0 -- return in a0 index of msbit set in a4 - */ - - .macro find_ms_setbit ad, as, at, base -#if XCHAL_HAVE_NSA - movi \at, 31+\base - nsau \as, \as // get index of \as, numbered from msbit (32 if absent) - sub \ad, \at, \as // get numbering from lsbit (0..31, -1 if absent) -#else /* XCHAL_HAVE_NSA */ - movi \at, \base // start with result of 0 (point to lsbit of 32) - - beqz \as, 2f // special case for zero argument: return -1 - bltui \as, 0x10000, 1f // is it one of the 16 lsbits? (if so, check lower 16 bits) - addi \at, \at, 16 // no, increment result to upper 16 bits (of 32) - //srli \as, \as, 16 // check upper half (shift right 16 bits) - extui \as, \as, 16, 16 // check upper half (shift right 16 bits) -1: - bltui \as, 0x100, 1f // is it one of the 8 lsbits? (if so, check lower 8 bits) - addi \at, \at, 8 // no, increment result to upper 8 bits (of 16) - srli \as, \as, 8 // shift right to check upper 8 bits -1: - bltui \as, 0x10, 1f // is it one of the 4 lsbits? (if so, check lower 4 bits) - addi \at, \at, 4 // no, increment result to upper 4 bits (of 8) - srli \as, \as, 4 // shift right 4 bits to check upper half -1: - bltui \as, 0x4, 1f // is it one of the 2 lsbits? (if so, check lower 2 bits) - addi \at, \at, 2 // no, increment result to upper 2 bits (of 4) - srli \as, \as, 2 // shift right 2 bits to check upper half -1: - bltui \as, 0x2, 1f // is it the lsbit? - addi \at, \at, 2 // no, increment result to upper bit (of 2) -2: - addi \at, \at, -1 // (from just above: add 1; from beqz: return -1) - //srli \as, \as, 1 -1: // done! \at contains index of msbit set (or -1 if none set) - .if 0x\ad - 0x\at // destination different than \at ? (works because regs are a0-a15) - mov \ad, \at // then move result to \ad - .endif -#endif /* XCHAL_HAVE_NSA */ - .endm // find_ms_setbit - -/*---------------------------------------------------------------------- - * find_ls_setbit - * - * This macro finds the least significant bit that is set in , - * and return its index in . - * Usage is the same as for the find_ms_setbit macro. - * Example: - * find_ls_setbit a0, a4, a0, 0 -- return in a0 index of lsbit set in a4 - */ - - .macro find_ls_setbit ad, as, at, base - neg \at, \as // keep only the least-significant bit that is set... - and \as, \at, \as // ... in \as - find_ms_setbit \ad, \as, \at, \base - .endm // find_ls_setbit - -/*---------------------------------------------------------------------- - * find_ls_one - * - * Same as find_ls_setbit with base zero. - * Source (as) and destination (ad) registers must be different. - * Provided for backward compatibility. - */ - - .macro find_ls_one ad, as - find_ls_setbit \ad, \as, \ad, 0 - .endm // find_ls_one - -/*---------------------------------------------------------------------- - * floop, floopnez, floopgtz, floopend - * - * These macros are used for fast inner loops that - * work whether or not the Loops options is configured. - * If the Loops option is configured, they simply use - * the zero-overhead LOOP instructions; otherwise - * they use explicit decrement and branch instructions. - * - * They are used in pairs, with floop, floopnez or floopgtz - * at the beginning of the loop, and floopend at the end. - * - * Each pair of loop macro calls must be given the loop count - * address register and a unique label for that loop. - * - * Example: - * - * movi a3, 16 // loop 16 times - * floop a3, myloop1 - * : - * bnez a7, end1 // exit loop if a7 != 0 - * : - * floopend a3, myloop1 - * end1: - * - * Like the LOOP instructions, these macros cannot be - * nested, must include at least one instruction, - * cannot call functions inside the loop, etc. - * The loop can be exited by jumping to the instruction - * following floopend (or elsewhere outside the loop), - * or continued by jumping to a NOP instruction placed - * immediately before floopend. - * - * Unlike LOOP instructions, the register passed to floop* - * cannot be used inside the loop, because it is used as - * the loop counter if the Loops option is not configured. - * And its value is undefined after exiting the loop. - * And because the loop counter register is active inside - * the loop, you can't easily use this construct to loop - * across a register file using ROTW as you might with LOOP - * instructions, unless you copy the loop register along. - */ - - /* Named label version of the macros: */ - - .macro floop ar, endlabel - floop_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel - .endm - - .macro floopnez ar, endlabel - floopnez_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel - .endm - - .macro floopgtz ar, endlabel - floopgtz_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel - .endm - - .macro floopend ar, endlabel - floopend_ \ar, .Lfloopstart_\endlabel, .Lfloopend_\endlabel - .endm - - /* Numbered local label version of the macros: */ -#if 0 /*UNTESTED*/ - .macro floop89 ar - floop_ \ar, 8, 9f - .endm - - .macro floopnez89 ar - floopnez_ \ar, 8, 9f - .endm - - .macro floopgtz89 ar - floopgtz_ \ar, 8, 9f - .endm - - .macro floopend89 ar - floopend_ \ar, 8b, 9 - .endm -#endif /*0*/ - - /* Underlying version of the macros: */ - - .macro floop_ ar, startlabel, endlabelref - .ifdef _infloop_ - .if _infloop_ - .err // Error: floop cannot be nested - .endif - .endif - .set _infloop_, 1 -#if XCHAL_HAVE_LOOPS - loop \ar, \endlabelref -#else /* XCHAL_HAVE_LOOPS */ -\startlabel: - addi \ar, \ar, -1 -#endif /* XCHAL_HAVE_LOOPS */ - .endm // floop_ - - .macro floopnez_ ar, startlabel, endlabelref - .ifdef _infloop_ - .if _infloop_ - .err // Error: floopnez cannot be nested - .endif - .endif - .set _infloop_, 1 -#if XCHAL_HAVE_LOOPS - loopnez \ar, \endlabelref -#else /* XCHAL_HAVE_LOOPS */ - beqz \ar, \endlabelref -\startlabel: - addi \ar, \ar, -1 -#endif /* XCHAL_HAVE_LOOPS */ - .endm // floopnez_ - - .macro floopgtz_ ar, startlabel, endlabelref - .ifdef _infloop_ - .if _infloop_ - .err // Error: floopgtz cannot be nested - .endif - .endif - .set _infloop_, 1 -#if XCHAL_HAVE_LOOPS - loopgtz \ar, \endlabelref -#else /* XCHAL_HAVE_LOOPS */ - bltz \ar, \endlabelref - beqz \ar, \endlabelref -\startlabel: - addi \ar, \ar, -1 -#endif /* XCHAL_HAVE_LOOPS */ - .endm // floopgtz_ - - - .macro floopend_ ar, startlabelref, endlabel - .ifndef _infloop_ - .err // Error: floopend without matching floopXXX - .endif - .ifeq _infloop_ - .err // Error: floopend without matching floopXXX - .endif - .set _infloop_, 0 -#if ! XCHAL_HAVE_LOOPS - bnez \ar, \startlabelref -#endif /* XCHAL_HAVE_LOOPS */ -\endlabel: - .endm // floopend_ - -/*---------------------------------------------------------------------- - * crsil -- conditional RSIL (read/set interrupt level) - * - * Executes the RSIL instruction if it exists, else just reads PS. - * The RSIL instruction does not exist in XEA2 if the interrupt - * option is not selected. - */ - - .macro crsil ar, newlevel -#if XCHAL_HAVE_INTERRUPTS - rsil \ar, \newlevel -#else - rsr.ps \ar -#endif - .endm // crsil - -/*---------------------------------------------------------------------- - * safe_movi_a0 -- move constant into a0 when L32R is not safe - * - * This macro is typically used by interrupt/exception handlers. - * Loads a 32-bit constant in a0, without using any other register, - * and without corrupting the LITBASE register, even when the - * value of the LITBASE register is unknown (eg. when application - * code and interrupt/exception handling code are built independently, - * and thus with independent values of the LITBASE register; - * debug monitors are one example of this). - * - * Worst-case size of resulting code: 17 bytes. - */ - - .macro safe_movi_a0 constant -#if XCHAL_HAVE_ABSOLUTE_LITERALS - /* Contort a PC-relative literal load even though we may be in litbase-relative mode: */ - j 1f - .begin no-transform // ensure what follows is assembled exactly as-is - .align 4 // ensure constant and call0 target ... - .byte 0 // ... are 4-byte aligned (call0 instruction is 3 bytes long) -1: call0 2f // read PC (that follows call0) in a0 - .long \constant // 32-bit constant to load into a0 -2: - .end no-transform - l32i a0, a0, 0 // load constant -#else - movi a0, \constant // no LITBASE, can assume PC-relative L32R -#endif - .endm - - - - -/*---------------------------------------------------------------------- - * window_spill{4,8,12} - * - * These macros spill callers' register windows to the stack. - * They work for both privileged and non-privileged tasks. - * Must be called from a windowed ABI context, eg. within - * a windowed ABI function (ie. valid stack frame, window - * exceptions enabled, not in exception mode, etc). - * - * This macro requires a single invocation of the window_spill_common - * macro in the same assembly unit and section. - * - * Note that using window_spill{4,8,12} macros is more efficient - * than calling a function implemented using window_spill_function, - * because the latter needs extra code to figure out the size of - * the call to the spilling function. - * - * Example usage: - * - * .text - * .align 4 - * .global some_function - * .type some_function,@function - * some_function: - * entry a1, 16 - * : - * : - * - * window_spill4 // Spill windows of some_function's callers; preserves a0..a3 only; - * // to use window_spill{8,12} in this example function we'd have - * // to increase space allocated by the entry instruction, because - * // 16 bytes only allows call4; 32 or 48 bytes (+locals) are needed - * // for call8/window_spill8 or call12/window_spill12 respectively. - * - * : - * - * retw - * - * window_spill_common // instantiates code used by window_spill4 - * - * - * On entry: - * none (if window_spill4) - * stack frame has enough space allocated for call8 (if window_spill8) - * stack frame has enough space allocated for call12 (if window_spill12) - * On exit: - * a4..a15 clobbered (if window_spill4) - * a8..a15 clobbered (if window_spill8) - * a12..a15 clobbered (if window_spill12) - * no caller windows are in live registers - */ - - .macro window_spill4 -#if XCHAL_HAVE_WINDOWED -# if XCHAL_NUM_AREGS == 16 - movi a15, 0 // for 16-register files, no need to call to reach the end -# elif XCHAL_NUM_AREGS == 32 - call4 .L__wdwspill_assist28 // call deep enough to clear out any live callers -# elif XCHAL_NUM_AREGS == 64 - call4 .L__wdwspill_assist60 // call deep enough to clear out any live callers -# endif -#endif - .endm // window_spill4 - - .macro window_spill8 -#if XCHAL_HAVE_WINDOWED -# if XCHAL_NUM_AREGS == 16 - movi a15, 0 // for 16-register files, no need to call to reach the end -# elif XCHAL_NUM_AREGS == 32 - call8 .L__wdwspill_assist24 // call deep enough to clear out any live callers -# elif XCHAL_NUM_AREGS == 64 - call8 .L__wdwspill_assist56 // call deep enough to clear out any live callers -# endif -#endif - .endm // window_spill8 - - .macro window_spill12 -#if XCHAL_HAVE_WINDOWED -# if XCHAL_NUM_AREGS == 16 - movi a15, 0 // for 16-register files, no need to call to reach the end -# elif XCHAL_NUM_AREGS == 32 - call12 .L__wdwspill_assist20 // call deep enough to clear out any live callers -# elif XCHAL_NUM_AREGS == 64 - call12 .L__wdwspill_assist52 // call deep enough to clear out any live callers -# endif -#endif - .endm // window_spill12 - - -/*---------------------------------------------------------------------- - * window_spill_function - * - * This macro outputs a function that will spill its caller's callers' - * register windows to the stack. Eg. it could be used to implement - * a version of xthal_window_spill() that works in non-privileged tasks. - * This works for both privileged and non-privileged tasks. - * - * Typical usage: - * - * .text - * .align 4 - * .global my_spill_function - * .type my_spill_function,@function - * my_spill_function: - * window_spill_function - * - * On entry to resulting function: - * none - * On exit from resulting function: - * none (no caller windows are in live registers) - */ - - .macro window_spill_function -#if XCHAL_HAVE_WINDOWED -# if XCHAL_NUM_AREGS == 32 - entry sp, 48 - bbci.l a0, 31, 1f // branch if called with call4 - bbsi.l a0, 30, 2f // branch if called with call12 - call8 .L__wdwspill_assist16 // called with call8, only need another 8 - retw -1: - call12 .L__wdwspill_assist16 // called with call4, only need another 12 - retw -2: - call4 .L__wdwspill_assist16 // called with call12, only need another 4 - retw -# elif XCHAL_NUM_AREGS == 64 - entry sp, 48 - bbci.l a0, 31, 1f // branch if called with call4 - bbsi.l a0, 30, 2f // branch if called with call12 - call4 .L__wdwspill_assist52 // called with call8, only need a call4 - retw -1: - call8 .L__wdwspill_assist52 // called with call4, only need a call8 - retw -2: - call12 .L__wdwspill_assist40 // called with call12, can skip a call12 - retw -# elif XCHAL_NUM_AREGS == 16 - entry sp, 16 - bbci.l a0, 31, 1f // branch if called with call4 - bbsi.l a0, 30, 2f // branch if called with call12 - movi a7, 0 // called with call8 - retw -1: - movi a11, 0 // called with call4 -2: - retw // if called with call12, everything already spilled - -// movi a15, 0 // trick to spill all but the direct caller -// j 1f -// // The entry instruction is magical in the assembler (gets auto-aligned) -// // so we have to jump to it to avoid falling through the padding. -// // We need entry/retw to know where to return. -//1: entry sp, 16 -// retw -# else -# error "unrecognized address register file size" -# endif - -#endif /* XCHAL_HAVE_WINDOWED */ - window_spill_common - .endm // window_spill_function - -/*---------------------------------------------------------------------- - * window_spill_common - * - * Common code used by any number of invocations of the window_spill## - * and window_spill_function macros. - * - * Must be instantiated exactly once within a given assembly unit, - * within call/j range of and same section as window_spill## - * macro invocations for that assembly unit. - * (Is automatically instantiated by the window_spill_function macro.) - */ - - .macro window_spill_common -#if XCHAL_HAVE_WINDOWED && (XCHAL_NUM_AREGS == 32 || XCHAL_NUM_AREGS == 64) - .ifndef .L__wdwspill_defined -# if XCHAL_NUM_AREGS >= 64 -.L__wdwspill_assist60: - entry sp, 32 - call8 .L__wdwspill_assist52 - retw -.L__wdwspill_assist56: - entry sp, 16 - call4 .L__wdwspill_assist52 - retw -.L__wdwspill_assist52: - entry sp, 48 - call12 .L__wdwspill_assist40 - retw -.L__wdwspill_assist40: - entry sp, 48 - call12 .L__wdwspill_assist28 - retw -# endif -.L__wdwspill_assist28: - entry sp, 48 - call12 .L__wdwspill_assist16 - retw -.L__wdwspill_assist24: - entry sp, 32 - call8 .L__wdwspill_assist16 - retw -.L__wdwspill_assist20: - entry sp, 16 - call4 .L__wdwspill_assist16 - retw -.L__wdwspill_assist16: - entry sp, 16 - movi a15, 0 - retw - .set .L__wdwspill_defined, 1 - .endif -#endif /* XCHAL_HAVE_WINDOWED with 32 or 64 aregs */ - .endm // window_spill_common - -/*---------------------------------------------------------------------- - * beqi32 - * - * macro implements version of beqi for arbitrary 32-bit immediate value - * - * beqi32 ax, ay, imm32, label - * - * Compares value in register ax with imm32 value and jumps to label if - * equal. Clobbers register ay if needed - * - */ - .macro beqi32 ax, ay, imm, label - .ifeq ((\imm-1) & ~7) // 1..8 ? - beqi \ax, \imm, \label - .else - .ifeq (\imm+1) // -1 ? - beqi \ax, \imm, \label - .else - .ifeq (\imm) // 0 ? - beqz \ax, \label - .else - // We could also handle immediates 10,12,16,32,64,128,256 - // but it would be a long macro... - movi \ay, \imm - beq \ax, \ay, \label - .endif - .endif - .endif - .endm // beqi32 - -/*---------------------------------------------------------------------- - * isync_retw_nop - * - * This macro must be invoked immediately after ISYNC if ISYNC - * would otherwise be immediately followed by RETW (or other instruction - * modifying WindowBase or WindowStart), in a context where - * kernel vector mode may be selected, and level-one interrupts - * and window overflows may be enabled, on an XEA1 configuration. - * - * On hardware with erratum "XEA1KWIN" (see for details), - * XEA1 code must have at least one instruction between ISYNC and RETW if - * run in kernel vector mode with interrupts and window overflows enabled. - */ - .macro isync_retw_nop -#if XCHAL_MAYHAVE_ERRATUM_XEA1KWIN - nop -#endif - .endm - -/*---------------------------------------------------------------------- - * isync_return_nop - * - * This macro should be used instead of isync_retw_nop in code that is - * intended to run on both the windowed and call0 ABIs - */ - .macro isync_return_nop -#ifdef __XTENSA_WINDOWED_ABI__ - isync_retw_nop -#endif - .endm - -/*---------------------------------------------------------------------- - * isync_erratum453 - * - * This macro must be invoked at certain points in the code, - * such as in exception and interrupt vectors in particular, - * to work around erratum 453. - */ - .macro isync_erratum453 -#if XCHAL_ERRATUM_453 - isync -#endif - .endm - - -/*---------------------------------------------------------------------- - * readsr - * - * wrapper for 'rsr' that constructs register names that involve levels - * e.g. EPCn etc. Use like so: - * readsr epc XCHAL_DEBUGLEVEL a2 - */ - .macro readsr reg suf ar - rsr.\reg\suf \ar - .endm - -/*---------------------------------------------------------------------- - * writesr - * - * wrapper for 'wsr' that constructs register names that involve levels - * e.g. EPCn etc. Use like so: - * writesr epc XCHAL_DEBUGLEVEL a2 - */ - .macro writesr reg suf ar - wsr.\reg\suf \ar - .endm - -/*---------------------------------------------------------------------- - * xchgsr - * - * wrapper for 'xsr' that constructs register names that involve levels - * e.g. EPCn etc. Use like so: - * xchgsr epc XCHAL_DEBUGLEVEL a2 - */ - .macro xchgsr reg suf ar - xsr.\reg\suf \ar - .endm - -/*---------------------------------------------------------------------- - * INDEX_SR - * - * indexing wrapper for rsr/wsr/xsr that constructs register names from - * the provided base name and the current index. Use like so: - * .set _idx, 0 - * INDEX_SR rsr.ccompare a2 - * - * this yields: rsr.ccompare0 a2 - */ - .macro INDEX_SR instr ar -.ifeq (_idx) - &instr&0 \ar -.endif -.ifeq (_idx-1) - &instr&1 \ar -.endif -.ifeq (_idx-2) - &instr&2 \ar -.endif -.ifeq (_idx-3) - &instr&3 \ar -.endif -.ifeq (_idx-4) - &instr&4 \ar -.endif -.ifeq (_idx-5) - &instr&5 \ar -.endif -.ifeq (_idx-6) - &instr&6 \ar -.endif -.ifeq (_idx-7) - &instr&7 \ar -.endif - .endm - - -/*---------------------------------------------------------------------- - * abs - * - * implements abs on machines that do not have it configured - */ - -#if !XCHAL_HAVE_ABS - .macro abs arr, ars - .ifc \arr, \ars - //src equal dest is less efficient - bgez \arr, 1f - neg \arr, \arr -1: - .else - neg \arr, \ars - movgez \arr, \ars, \ars - .endif - .endm -#endif /* !XCHAL_HAVE_ABS */ - - -/*---------------------------------------------------------------------- - * addx2 - * - * implements addx2 on machines that do not have it configured - * - */ - -#if !XCHAL_HAVE_ADDX - .macro addx2 arr, ars, art - .ifc \arr, \art - .ifc \arr, \ars - // addx2 a, a, a (not common) - .err - .else - add \arr, \ars, \art - add \arr, \ars, \art - .endif - .else - //addx2 a, b, c - //addx2 a, a, b - //addx2 a, b, b - slli \arr, \ars, 1 - add \arr, \arr, \art - .endif - .endm -#endif /* !XCHAL_HAVE_ADDX */ - -/*---------------------------------------------------------------------- - * addx4 - * - * implements addx4 on machines that do not have it configured - * - */ - -#if !XCHAL_HAVE_ADDX - .macro addx4 arr, ars, art - .ifc \arr, \art - .ifc \arr, \ars - // addx4 a, a, a (not common) - .err - .else - //# addx4 a, b, a - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - .endif - .else - //addx4 a, b, c - //addx4 a, a, b - //addx4 a, b, b - slli \arr, \ars, 2 - add \arr, \arr, \art - .endif - .endm -#endif /* !XCHAL_HAVE_ADDX */ - -/*---------------------------------------------------------------------- - * addx8 - * - * implements addx8 on machines that do not have it configured - * - */ - -#if !XCHAL_HAVE_ADDX - .macro addx8 arr, ars, art - .ifc \arr, \art - .ifc \arr, \ars - //addx8 a, a, a (not common) - .err - .else - //addx8 a, b, a - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - add \arr, \ars, \art - .endif - .else - //addx8 a, b, c - //addx8 a, a, b - //addx8 a, b, b - slli \arr, \ars, 3 - add \arr, \arr, \art - .endif - .endm -#endif /* !XCHAL_HAVE_ADDX */ - - -/*---------------------------------------------------------------------- - * rfe_rfue - * - * Maps to RFUE on XEA1, and RFE on XEA2. No mapping on XEAX. - */ - -#if XCHAL_HAVE_XEA1 - .macro rfe_rfue - rfue - .endm -#elif XCHAL_HAVE_XEA2 - .macro rfe_rfue - rfe - .endm -#endif - - -/*---------------------------------------------------------------------- - * abi_entry - * - * Generate proper function entry sequence for the current ABI - * (windowed or call0). Takes care of allocating stack space (up to 1kB) - * and saving the return PC, if necessary. The corresponding abi_return - * macro does the corresponding stack deallocation and restoring return PC. - * - * Parameters are: - * - * locsize Number of bytes to allocate on the stack - * for local variables (and for args to pass to - * callees, if any calls are made). Defaults to zero. - * The macro rounds this up to a multiple of 16. - * NOTE: large values are allowed (e.g. up to 1 GB). - * - * callsize Maximum call size made by this function. - * Leave zero (default) for leaf functions, i.e. if - * this function makes no calls to other functions. - * Otherwise must be set to 4, 8, or 12 according - * to whether the "largest" call made is a call[x]4, - * call[x]8, or call[x]12 (for call0 ABI, it makes - * no difference whether this is set to 4, 8 or 12, - * but it must be set to one of these values). - * - * NOTE: It is up to the caller to align the entry point, declare the - * function symbol, make it global, etc. - * - * NOTE: This macro relies on assembler relaxation for large values - * of locsize. It might not work with the no-transform directive. - * NOTE: For the call0 ABI, this macro ensures SP is allocated or - * de-allocated cleanly, i.e. without temporarily allocating too much - * (or allocating negatively!) due to addi relaxation. - * - * NOTE: Generating the proper sequence and register allocation for - * making calls in an ABI independent manner is a separate topic not - * covered by this macro. - * - * NOTE: To access arguments, you can't use a fixed offset from SP. - * The offset depends on the ABI, whether the function is leaf, etc. - * The simplest method is probably to use the .locsz symbol, which - * is set by this macro to the actual number of bytes allocated on - * the stack, in other words, to the offset from SP to the arguments. - * E.g. for a function whose arguments are all 32-bit integers, you - * can get the 7th and 8th arguments (1st and 2nd args stored on stack) - * using: - * l32i a2, sp, .locsz - * l32i a3, sp, .locsz+4 - * (this example works as long as locsize is under L32I's offset limit - * of 1020 minus up to 48 bytes of ABI-specific stack usage; - * otherwise you might first need to do "addi a?, sp, .locsz" - * or similar sequence). - * - * NOTE: For call0 ABI, this macro (and abi_return) may clobber a9 - * (a caller-saved register). - * - * Examples: - * abi_entry - * abi_entry 5 - * abi_entry 22, 8 - * abi_entry 0, 4 - */ - - /* - * Compute .locsz and .callsz without emitting any instructions. - * Used by both abi_entry and abi_return. - * Assumes locsize >= 0. - */ - .macro abi_entry_size locsize=0, callsize=0 -#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ -# if XCHAL_HAVE_XEA3 - .set .callsz, 32 /* call8 only */ -# else - .ifeq \callsize - .set .callsz, 16 - .else - .ifeq \callsize-4 - .set .callsz, 16 - .else - .ifeq \callsize-8 - .set .callsz, 32 - .else - .ifeq \callsize-12 - .set .callsz, 48 - .else - .error "abi_entry: invalid call size \callsize" - .endif - .endif - .endif - .endif -# endif - .set .locsz, .callsz + ((\locsize + 15) & -16) -#else - .set .callsz, \callsize - .if .callsz /* if calls, need space for return PC */ - .set .locsz, (\locsize + 4 + 15) & -16 - .else - .set .locsz, (\locsize + 15) & -16 - .endif -#endif - .endm - - .macro abi_entry locsize=0, callsize=0 - .iflt \locsize - .error "abi_entry: invalid negative size of locals (\locsize)" - .endif - abi_entry_size \locsize, \callsize -#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ -# define ABI_ENTRY_MINSIZE 3 /* size of abi_entry (no arguments) instructions in bytes */ - .ifgt .locsz - 32760 /* .locsz > 32760 (ENTRY's max range)? */ - /* Funky computation to try to have assembler use addmi efficiently if possible: */ - entry sp, 0x7F00 + (.locsz & 0xF0) - addi a12, sp, - ((.locsz & -0x100) - 0x7F00) - movsp sp, a12 - .else - entry sp, .locsz - .endif -#else -# define ABI_ENTRY_MINSIZE 0 /* size of abi_entry (no arguments) instructions in bytes */ - .if .locsz - .ifle .locsz - 128 /* if locsz <= 128 */ - addi sp, sp, -.locsz - .if .callsz - s32i a0, sp, .locsz - 4 - .endif - .elseif .callsz /* locsz > 128, with calls: */ - movi a9, .locsz - 16 /* note: a9 is caller-saved */ - addi sp, sp, -16 - s32i a0, sp, 12 - sub sp, sp, a9 - .else /* locsz > 128, no calls: */ - movi a9, .locsz - sub sp, sp, a9 - .endif /* end */ - .endif -#endif - .endm - - - -/*---------------------------------------------------------------------- - * abi_return - * - * Generate proper function exit sequence for the current ABI - * (windowed or call0). Takes care of freeing stack space and - * restoring the return PC, if necessary. - * NOTE: This macro MUST be invoked following a corresponding - * abi_entry macro invocation. For call0 ABI in particular, - * all stack and PC restoration are done according to the last - * abi_entry macro invoked before this macro in the assembly file. - * - * Normally this macro takes no arguments. However to allow - * for placing abi_return *before* abi_entry (as must be done - * for some highly optimized assembly), it optionally takes - * exactly the same arguments as abi_entry. - */ - - .macro abi_return locsize=-1, callsize=0 - .ifge \locsize - abi_entry_size \locsize, \callsize - .endif -#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ - retw -#else - .if .locsz - .iflt .locsz - 128 /* if locsz < 128 */ - .if .callsz - l32i a0, sp, .locsz - 4 - .endif - addi sp, sp, .locsz - .elseif .callsz /* locsz >= 128, with calls: */ - addi a9, sp, .locsz - 16 - l32i a0, a9, 12 - addi sp, a9, 16 - .else /* locsz >= 128, no calls: */ - movi a9, .locsz - add sp, sp, a9 - .endif /* end */ - .endif - ret -#endif - .endm - - -/* - * HW erratum fixes. - */ - - .macro hw_erratum_487_fix -#if defined XSHAL_ERRATUM_487_FIX - isync -#endif - .endm - -#if XCHAL_HAVE_XEA3 && XCHAL_HW_MIN_VERSION == XTENSA_HWVERSION_RH_2016_0 - .macro halt imm=0 -.Lhalt\@: j .Lhalt\@ - .endm - .macro halt.n - halt 0 - .endm -#endif - - -/* - * These macros are internal, subject to change, and should not be used in - * any new code. - */ - -#define _GBL(x) .global x -#define _TYP(x) .type x,@function -#define _ALN(x) .align x -#define _SIZ(x) .size x, . - x -#define _MKEND(x) .purgem endfunc ; .macro endfunc ; _SIZ(x) ; .purgem endfunc ; .macro endfunc ; .endm ; .endm -#define _SYMT(x) _GBL(x); _MKEND(x); _TYP(x); _ALN(4); x: -#define _SYM2(x) _GBL(x); _TYP(x); x: -#define _SYM(x) _GBL(x); _MKEND(x); _ALN(4); x: -.macro endfunc ; .endm - -/* - * the DECLFUNC() macro provides a mechanism for implementing both the - * standard and _nw interface with a single copy of the code. - * - * For Call0 ABI there is one function definition which is labeled with - * both the xthal_..._nw and xthal_... symbols. - * - * For windowed ABI, two compilations are involved (one with the __NW_FUNCTION__ - * symbol defined) resulting in two separate functions (the _nw one without - * the window adjustments). -*/ - -#if defined(__NW_FUNCTION__) -# define DECLFUNC(x) _SYMT(x ## _nw) -#else -# if defined (__XTENSA_CALL0_ABI__) -# define DECLFUNC(x) _SYMT(x); _SYM2(x ## _nw) -# else -# define DECLFUNC(x) _SYMT(x) -# endif -#endif - - -/* - * Macros to support virtual ops. - */ -#include - -#ifndef XT_ADD_A - .macro add.a a,b,c ; add \a, \b, \c ; .endm -#endif - -#ifndef XT_ADDI_A - .macro addi.a a,b,c ; addi \a, \b, \c ; .endm -#endif - -#ifndef XT_ADDMI_A - .macro addmi.a a,b,c ; addmi \a, \b, \c ; .endm -#endif - -#ifndef XT_ADDX2_A - .macro addx2.a a,b,c ; addx2 \a, \b, \c ; .endm -#endif - -#ifndef XT_ADDX4_A - .macro addx4.a a,b,c ; addx4 \a, \b, \c ; .endm -#endif - -#ifndef XT_ADDX8_A - .macro addx8.a a,b,c ; addx8 \a, \b, \c ; .endm -#endif - -#ifndef XT_MOV_A - .macro mov.a a,b ; mov \a, \b ; .endm -#endif - -#ifndef XT_SUB_A - .macro sub.a a,b,c ; sub \a, \b, \c ; .endm -#endif - -/* Places the core-id in the requested AR register.*/ - .macro xt_core_id ar -#if XCHAL_HAVE_PRID -#if PRID_ID_BITS - rsr.prid \ar - extui \ar, \ar, PRID_ID_SHIFT, PRID_ID_BITS -#else - movi \ar, 0 -#endif -#else - movi \ar, 0 -#endif - .endm - -.macro clr_dcache scratch1, scratch2, scratch3 -#if defined(XCHAL_DCACHE_SIZE) && (XCHAL_DCACHE_SIZE > 0) - movi \scratch3, 0 - movi \scratch1, 0 - movi \scratch2, XCHAL_DCACHE_SIZE -1: - sdct \scratch3, \scratch1 - addi \scratch1, \scratch1, XCHAL_DCACHE_LINESIZE * (1 << XCHAL_DCACHE_LINES_PER_TAG_LOG2) - bne \scratch1, \scratch2, 1b - movi \scratch1, 0 -2: - sdcw \scratch3, \scratch1 - addi \scratch1, \scratch1, 4 - bne \scratch1, \scratch2, 2b -#endif -.endm - -.macro clr_icache scratch1, scratch2, scratch3 -#if defined(XCHAL_ICACHE_SIZE) && (XCHAL_ICACHE_SIZE > 0) - movi \scratch3, 0 - movi \scratch1, 0 - movi \scratch2, XCHAL_ICACHE_SIZE -1: - sict \scratch3, \scratch1 - addi \scratch1, \scratch1, XCHAL_ICACHE_LINESIZE - bne \scratch1, \scratch2, 1b - movi \scratch1, 0 -2: - sicw \scratch3, \scratch1 - addi \scratch1, \scratch1, 4 - bne \scratch1, \scratch2, 2b -#endif -.endm - -.macro clr_localmem base_addr, bytes, scratch1, scratch2, scratch3 - movi \scratch1, \base_addr - movi \scratch2, \base_addr + \bytes - movi \scratch3, 0 -1: - s32i \scratch3, \scratch1, 0 - addi \scratch1, \scratch1, 4 - bne \scratch1, \scratch2, 1b -.endm - -.macro clr_all_localmems scratch1, scratch2, scratch3 -#if defined(XCHAL_INSTRAM0_SIZE) && (XCHAL_INSTRAM0_SIZE > 0) - clr_localmem XCHAL_INSTRAM0_VADDR, XCHAL_INSTRAM0_SIZE, \scratch1, \scratch2, \scratch3 -#endif -#if defined(XCHAL_INSTRAM1_SIZE) && (XCHAL_INSTRAM1_SIZE > 0) - clr_localmem XCHAL_INSTRAM1_VADDR, XCHAL_INSTRAM1_SIZE, \scratch1, \scratch2, \scratch3 -#endif -#if defined(XCHAL_DATARAM0_SIZE) && (XCHAL_DATARAM0_SIZE > 0) - clr_localmem XCHAL_DATARAM0_VADDR, XCHAL_DATARAM0_SIZE, \scratch1, \scratch2, \scratch3 -#endif -#if defined(XCHAL_DATARAM1_SIZE) && (XCHAL_DATARAM1_SIZE > 0) - clr_localmem XCHAL_DATARAM1_VADDR, XCHAL_DATARAM1_SIZE, \scratch1, \scratch2, \scratch3 -#endif -#if defined(XCHAL_URAM0_SIZE) && (XCHAL_URAM0_SIZE > 0) - clr_localmem XCHAL_URAM0_VADDR, XCHAL_URAM0_SIZE, \scratch1, \scratch2, \scratch3 -#endif -#if defined(XCHAL_URAM1_SIZE) && (XCHAL_URAM1_SIZE > 0) - clr_localmem XCHAL_URAM1_VADDR, XCHAL_URAM1_SIZE, \scratch1, \scratch2, \scratch3 -#endif -.endm - -#endif /*XTENSA_COREASM_H*/ - diff --git a/src/arch/xtensa/include/xtensa/corebits.h b/src/arch/xtensa/include/xtensa/corebits.h deleted file mode 100644 index c14dabcdcc3c..000000000000 --- a/src/arch/xtensa/include/xtensa/corebits.h +++ /dev/null @@ -1,193 +0,0 @@ -/* - * xtensa/corebits.h - Xtensa Special Register field positions, masks, values. - * - * (In previous releases, these were defined in specreg.h, a generated file. - * This file is not generated, ie. it is processor configuration independent.) - */ - -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/include/xtensa/corebits.h#1 $ */ - -/* - * Copyright (c) 2005-2011 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTENSA_COREBITS_H -#define XTENSA_COREBITS_H - -/* EXCCAUSE register fields: */ -#define EXCCAUSE_EXCCAUSE_SHIFT 0 -#define EXCCAUSE_EXCCAUSE_MASK 0x3F -/* EXCCAUSE register values: */ -/* - * General Exception Causes - * (values of EXCCAUSE special register set by general exceptions, - * which vector to the user, kernel, or double-exception vectors). - */ -#define EXCCAUSE_ILLEGAL 0 /* Illegal Instruction */ -#define EXCCAUSE_SYSCALL 1 /* System Call (SYSCALL instruction) */ -#define EXCCAUSE_INSTR_ERROR 2 /* Instruction Fetch Error */ -# define EXCCAUSE_IFETCHERROR 2 /* (backward compatibility macro, deprecated, avoid) */ -#define EXCCAUSE_LOAD_STORE_ERROR 3 /* Load Store Error */ -# define EXCCAUSE_LOADSTOREERROR 3 /* (backward compatibility macro, deprecated, avoid) */ -#define EXCCAUSE_LEVEL1_INTERRUPT 4 /* Level 1 Interrupt */ -# define EXCCAUSE_LEVEL1INTERRUPT 4 /* (backward compatibility macro, deprecated, avoid) */ -#define EXCCAUSE_ALLOCA 5 /* Stack Extension Assist (MOVSP instruction) for alloca */ -#define EXCCAUSE_DIVIDE_BY_ZERO 6 /* Integer Divide by Zero */ -#define EXCCAUSE_SPECULATION 7 /* Use of Failed Speculative Access (not implemented) */ -#define EXCCAUSE_PRIVILEGED 8 /* Privileged Instruction */ -#define EXCCAUSE_UNALIGNED 9 /* Unaligned Load or Store */ -/* Reserved 10..11 */ -#define EXCCAUSE_INSTR_DATA_ERROR 12 /* PIF Data Error on Instruction Fetch (RB-200x and later) */ -#define EXCCAUSE_LOAD_STORE_DATA_ERROR 13 /* PIF Data Error on Load or Store (RB-200x and later) */ -#define EXCCAUSE_INSTR_ADDR_ERROR 14 /* PIF Address Error on Instruction Fetch (RB-200x and later) */ -#define EXCCAUSE_LOAD_STORE_ADDR_ERROR 15 /* PIF Address Error on Load or Store (RB-200x and later) */ -#define EXCCAUSE_ITLB_MISS 16 /* ITLB Miss (no ITLB entry matches, hw refill also missed) */ -#define EXCCAUSE_ITLB_MULTIHIT 17 /* ITLB Multihit (multiple ITLB entries match) */ -#define EXCCAUSE_INSTR_RING 18 /* Ring Privilege Violation on Instruction Fetch */ -/* Reserved 19 */ /* Size Restriction on IFetch (not implemented) */ -#define EXCCAUSE_INSTR_PROHIBITED 20 /* Cache Attribute does not allow Instruction Fetch */ -/* Reserved 21..23 */ -#define EXCCAUSE_DTLB_MISS 24 /* DTLB Miss (no DTLB entry matches, hw refill also missed) */ -#define EXCCAUSE_DTLB_MULTIHIT 25 /* DTLB Multihit (multiple DTLB entries match) */ -#define EXCCAUSE_LOAD_STORE_RING 26 /* Ring Privilege Violation on Load or Store */ -/* Reserved 27 */ /* Size Restriction on Load/Store (not implemented) */ -#define EXCCAUSE_LOAD_PROHIBITED 28 /* Cache Attribute does not allow Load */ -#define EXCCAUSE_STORE_PROHIBITED 29 /* Cache Attribute does not allow Store */ -/* Reserved 30..31 */ -#define EXCCAUSE_CP_DISABLED(n) (32+(n)) /* Access to Coprocessor 'n' when disabled */ -#define EXCCAUSE_CP0_DISABLED 32 /* Access to Coprocessor 0 when disabled */ -#define EXCCAUSE_CP1_DISABLED 33 /* Access to Coprocessor 1 when disabled */ -#define EXCCAUSE_CP2_DISABLED 34 /* Access to Coprocessor 2 when disabled */ -#define EXCCAUSE_CP3_DISABLED 35 /* Access to Coprocessor 3 when disabled */ -#define EXCCAUSE_CP4_DISABLED 36 /* Access to Coprocessor 4 when disabled */ -#define EXCCAUSE_CP5_DISABLED 37 /* Access to Coprocessor 5 when disabled */ -#define EXCCAUSE_CP6_DISABLED 38 /* Access to Coprocessor 6 when disabled */ -#define EXCCAUSE_CP7_DISABLED 39 /* Access to Coprocessor 7 when disabled */ -/* Reserved 40..63 */ - -/* PS register fields: */ -#define PS_WOE_SHIFT 18 -#define PS_WOE_MASK 0x00040000 -#define PS_WOE PS_WOE_MASK -#define PS_CALLINC_SHIFT 16 -#define PS_CALLINC_MASK 0x00030000 -#define PS_CALLINC(n) (((n)&3)<4) 0 2 or >3 (TBD) - * T1030.0 0 1 (HAL beta) - * T1030.{1,2} 0 3 Equivalent to first release. - * T1030.n (n>=3) 0 >= 3 (TBD) - * T1040.n 1040 n Full CHAL available from T1040.2 - * T1050.n 1050 n . - * 6.0.n 6000 n Xtensa Tools v6 (RA-200x.n) - * 7.0.n 7000 n Xtensa Tools v7 (RB-200x.n) - * 7.1.n 7010 n Xtensa Tools v7.1 (RB-200x.(n+2)) - * 8.0.n 8000 n Xtensa Tools v8 (RC-20xx.n) - * 9.0.n 9000 n Xtensa Tools v9 (RD-201x.n) - * 10.0.n 10000 n Xtensa Tools v10 (RE-201x.n) - * - * - * Note: there is a distinction between the software version with - * which something is compiled (accessible using XTHAL_RELEASE_* macros) - * and the software version with which the HAL library was compiled - * (accessible using Xthal_release_* global variables). This - * distinction is particularly relevant for vendors that distribute - * configuration-independent binaries (eg. an OS), where their customer - * might link it with a HAL of a different Xtensa software version. - * In this case, it may be appropriate for the OS to verify at run-time - * whether XTHAL_RELEASE_* and Xthal_release_* are compatible. - * [Guidelines as to which version is compatible with which are not - * currently provided explicitly, but might be inferred from reading - * OSKit documentation for all releases -- compatibility is also highly - * dependent on which HAL features are used. Each version is usually - * backward compatible, with very few exceptions if any.] - */ - -/* Version comparison operators (among major/minor pairs): */ -#define XTHAL_REL_GE(maja,mina, majb,minb) ((maja) > (majb) || \ - ((maja) == (majb) && (mina) >= (minb))) -#define XTHAL_REL_GT(maja,mina, majb,minb) ((maja) > (majb) || \ - ((maja) == (majb) && (mina) > (minb))) -#define XTHAL_REL_LE(maja,mina, majb,minb) ((maja) < (majb) || \ - ((maja) == (majb) && (mina) <= (minb))) -#define XTHAL_REL_LT(maja,mina, majb,minb) ((maja) < (majb) || \ - ((maja) == (majb) && (mina) < (minb))) -#define XTHAL_REL_EQ(maja,mina, majb,minb) ((maja) == (majb) && (mina) == (minb)) - -/* Fuzzy (3-way) logic operators: */ -#define XTHAL_MAYBE -1 /* 0=NO, 1=YES, -1=MAYBE */ -#define XTHAL_FUZZY_AND(a,b) (((a)==0 || (b)==0) ? 0 : ((a)==1 && (b)==1) ? 1 : XTHAL_MAYBE) -#define XTHAL_FUZZY_OR(a,b) (((a)==1 || (b)==1) ? 1 : ((a)==0 && (b)==0) ? 0 : XTHAL_MAYBE) -#define XTHAL_FUZZY_NOT(a) (((a)==0 || (a)==1) ? (1-(a)) : XTHAL_MAYBE) - - -/* - * Architectural limit, independent of configuration: - */ -#define XTHAL_MAX_CPS 8 /* max number of coprocessors (0..7) */ - -/* Misc: */ -#define XTHAL_LITTLEENDIAN 0 -#define XTHAL_BIGENDIAN 1 - - - -#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) -#ifdef __cplusplus -extern "C" { -#endif - -/*---------------------------------------------------------------------- - HAL - ----------------------------------------------------------------------*/ - -/* Constant to be checked in build = (XTHAL_MAJOR_REV<<16)|XTHAL_MINOR_REV */ -extern const unsigned int Xthal_rev_no; - - -/*---------------------------------------------------------------------- - Optional/Custom Processor State - ----------------------------------------------------------------------*/ - -/* save & restore the extra processor state */ -extern void xthal_save_extra(void *base); -extern void xthal_restore_extra(void *base); - -extern void xthal_save_cpregs(void *base, int); -extern void xthal_restore_cpregs(void *base, int); -/* versions specific to each coprocessor id */ -extern void xthal_save_cp0(void *base); -extern void xthal_save_cp1(void *base); -extern void xthal_save_cp2(void *base); -extern void xthal_save_cp3(void *base); -extern void xthal_save_cp4(void *base); -extern void xthal_save_cp5(void *base); -extern void xthal_save_cp6(void *base); -extern void xthal_save_cp7(void *base); -extern void xthal_restore_cp0(void *base); -extern void xthal_restore_cp1(void *base); -extern void xthal_restore_cp2(void *base); -extern void xthal_restore_cp3(void *base); -extern void xthal_restore_cp4(void *base); -extern void xthal_restore_cp5(void *base); -extern void xthal_restore_cp6(void *base); -extern void xthal_restore_cp7(void *base); -/* pointers to each of the functions above */ -extern void* Xthal_cpregs_save_fn[XTHAL_MAX_CPS]; -extern void* Xthal_cpregs_restore_fn[XTHAL_MAX_CPS]; -/* similarly for non-windowed ABI (may be same or different) */ -extern void* Xthal_cpregs_save_nw_fn[XTHAL_MAX_CPS]; -extern void* Xthal_cpregs_restore_nw_fn[XTHAL_MAX_CPS]; - -/*extern void xthal_save_all_extra(void *base);*/ -/*extern void xthal_restore_all_extra(void *base);*/ - -/* space for processor state */ -extern const unsigned int Xthal_extra_size; -extern const unsigned int Xthal_extra_align; -extern const unsigned int Xthal_cpregs_size[XTHAL_MAX_CPS]; -extern const unsigned int Xthal_cpregs_align[XTHAL_MAX_CPS]; -extern const unsigned int Xthal_all_extra_size; -extern const unsigned int Xthal_all_extra_align; -/* coprocessor names */ -extern const char * const Xthal_cp_names[XTHAL_MAX_CPS]; - -/* initialize the extra processor */ -/*extern void xthal_init_extra(void);*/ -/* initialize the TIE coprocessor */ -/*extern void xthal_init_cp(int);*/ - -/* initialize the extra processor */ -extern void xthal_init_mem_extra(void *); -/* initialize the TIE coprocessor */ -extern void xthal_init_mem_cp(void *, int); - -/* the number of TIE coprocessors contiguous from zero (for Tor2) */ -extern const unsigned int Xthal_num_coprocessors; - -/* actual number of coprocessors */ -extern const unsigned char Xthal_cp_num; -/* index of highest numbered coprocessor, plus one */ -extern const unsigned char Xthal_cp_max; -/* index of highest allowed coprocessor number, per cfg, plus one */ -/*extern const unsigned char Xthal_cp_maxcfg;*/ -/* bitmask of which coprocessors are present */ -extern const unsigned int Xthal_cp_mask; - -/* read & write extra state register */ -/*extern int xthal_read_extra(void *base, unsigned reg, unsigned *value);*/ -/*extern int xthal_write_extra(void *base, unsigned reg, unsigned value);*/ - -/* read & write a TIE coprocessor register */ -/*extern int xthal_read_cpreg(void *base, int cp, unsigned reg, unsigned *value);*/ -/*extern int xthal_write_cpreg(void *base, int cp, unsigned reg, unsigned value);*/ - -/* return coprocessor number based on register */ -/*extern int xthal_which_cp(unsigned reg);*/ - - -/*---------------------------------------------------------------------- - Register Windows - ----------------------------------------------------------------------*/ - -/* number of registers in register window */ -extern const unsigned int Xthal_num_aregs; -extern const unsigned char Xthal_num_aregs_log2; - - -/*---------------------------------------------------------------------- - Cache - ----------------------------------------------------------------------*/ - -/* size of the cache lines in log2(bytes) */ -extern const unsigned char Xthal_icache_linewidth; -extern const unsigned char Xthal_dcache_linewidth; -/* size of the cache lines in bytes (2^linewidth) */ -extern const unsigned short Xthal_icache_linesize; -extern const unsigned short Xthal_dcache_linesize; - -/* size of the caches in bytes (ways * 2^(linewidth + setwidth)) */ -extern const unsigned int Xthal_icache_size; -extern const unsigned int Xthal_dcache_size; -/* cache features */ -extern const unsigned char Xthal_dcache_is_writeback; - -/* cache region operations*/ -extern void xthal_icache_region_invalidate( void *addr, unsigned size ); -extern void xthal_dcache_region_invalidate( void *addr, unsigned size ); -extern void xthal_dcache_region_writeback( void *addr, unsigned size ); -extern void xthal_dcache_region_writeback_inv( void *addr, unsigned size ); - -#ifndef XTHAL_USE_CACHE_MACROS -/* cache line operations*/ -extern void xthal_icache_line_invalidate(void *addr); -extern void xthal_dcache_line_invalidate(void *addr); -extern void xthal_dcache_line_writeback(void *addr); -extern void xthal_dcache_line_writeback_inv(void *addr); -/* sync icache and memory */ -extern void xthal_icache_sync( void ); -/* sync dcache and memory */ -extern void xthal_dcache_sync( void ); -#endif - -/* get/set number of icache ways enabled */ -extern unsigned int xthal_icache_get_ways(void); -extern void xthal_icache_set_ways(unsigned int ways); -/* get/set number of dcache ways enabled */ -extern unsigned int xthal_dcache_get_ways(void); -extern void xthal_dcache_set_ways(unsigned int ways); - -/* coherency (low-level -- not normally called directly) */ -extern void xthal_cache_coherence_on( void ); -extern void xthal_cache_coherence_off( void ); -/* coherency (high-level) */ -extern void xthal_cache_coherence_optin( void ); -extern void xthal_cache_coherence_optout( void ); - -/* - * Cache prefetch control. - * The parameter to xthal_set_cache_prefetch() contains both - * a PREFCTL register value and a mask of which bits to actually modify. - * This allows easily combining field macros (below) by ORing, - * leaving unspecified fields unmodified. - * - * For backward compatibility with the older version of this routine - * (that took 15-bit value and mask in a 32-bit parameter, for pre-RF - * cores with only the lower 15 bits of PREFCTL defined), the 32-bit - * value and mask are staggered as follows in a 64-bit parameter: - * param[63:48] are PREFCTL[31:16] if param[31] is set - * param[47:32] are mask[31:16] if param[31] is set - * param[31] is set if mask is used, 0 if not - * param[31:16] are mask[15:0] if param[31] is set - * param[31:16] are PREFCTL[31:16] if param[31] is clear - * param[15:0] are PREFCTL[15:0] - * - * Limitation: PREFCTL register bit 31 cannot be set without masking, - * and bit 15 must always be set when using masking, so it is hoped that - * these two bits will remain reserved, read-as-zero in PREFCTL. - */ -#define XTHAL_PREFETCH_ENABLE -1 /* enable inst+data prefetch */ -#define XTHAL_PREFETCH_DISABLE 0xFFFF0000 /* disab inst+data prefetch*/ -#define XTHAL_DCACHE_PREFETCH(n) (0x800F0000+((n)&0xF)) /* data-side */ -#define XTHAL_DCACHE_PREFETCH_OFF XTHAL_DCACHE_PREFETCH(0) /* disable */ -#define XTHAL_DCACHE_PREFETCH_LOW XTHAL_DCACHE_PREFETCH(4) /* less aggr.*/ -#define XTHAL_DCACHE_PREFETCH_MEDIUM XTHAL_DCACHE_PREFETCH(5) /* mid aggr. */ -#define XTHAL_DCACHE_PREFETCH_HIGH XTHAL_DCACHE_PREFETCH(8) /* more aggr.*/ -#define XTHAL_DCACHE_PREFETCH_L1_OFF 0x90000000 /* to prefetch buffers*/ -#define XTHAL_DCACHE_PREFETCH_L1 0x90001000 /* direct to L1 dcache*/ -#define XTHAL_ICACHE_PREFETCH(n) (0x80F00000+(((n)&0xF)<<4)) /* i-side */ -#define XTHAL_ICACHE_PREFETCH_OFF XTHAL_ICACHE_PREFETCH(0) /* disable */ -#define XTHAL_ICACHE_PREFETCH_LOW XTHAL_ICACHE_PREFETCH(4) /* less aggr.*/ -#define XTHAL_ICACHE_PREFETCH_MEDIUM XTHAL_ICACHE_PREFETCH(5) /* mid aggr. */ -#define XTHAL_ICACHE_PREFETCH_HIGH XTHAL_ICACHE_PREFETCH(8) /* more aggr.*/ -#define XTHAL_ICACHE_PREFETCH_L1_OFF 0xA0000000 /* (not implemented) */ -#define XTHAL_ICACHE_PREFETCH_L1 0xA0002000 /* (not implemented) */ -#define _XTHAL_PREFETCH_BLOCKS(n) ((n)<0?0:(n)<5?(n):(n)<15?((n)>>1)+2:9) -#define XTHAL_PREFETCH_BLOCKS(n) (0x0000000F80000000ULL + \ - (((unsigned long long)_XTHAL_PREFETCH_BLOCKS(n))<<48)) - -extern int xthal_get_cache_prefetch( void ); -extern int xthal_set_cache_prefetch( int ); -extern int xthal_set_cache_prefetch_long( unsigned long long ); -/* Only use the new extended function from now on: */ -#define xthal_set_cache_prefetch xthal_set_cache_prefetch_long -#define xthal_set_cache_prefetch_nw xthal_set_cache_prefetch_long_nw - - -/*---------------------------------------------------------------------- - Debug - ----------------------------------------------------------------------*/ - -/* 1 if debug option configured, 0 if not: */ -extern const int Xthal_debug_configured; - -/* Set (plant) and remove software breakpoint, both synchronizing cache: */ -extern unsigned int xthal_set_soft_break(void *addr); -extern void xthal_remove_soft_break(void *addr, unsigned int); - - -/*---------------------------------------------------------------------- - Disassembler - ----------------------------------------------------------------------*/ - -/* Max expected size of the return buffer for a disassembled instruction (hint only): */ -#define XTHAL_DISASM_BUFSIZE 80 - -/* Disassembly option bits for selecting what to return: */ -#define XTHAL_DISASM_OPT_ADDR 0x0001 /* display address */ -#define XTHAL_DISASM_OPT_OPHEX 0x0002 /* display opcode bytes in hex */ -#define XTHAL_DISASM_OPT_OPCODE 0x0004 /* display opcode name (mnemonic) */ -#define XTHAL_DISASM_OPT_PARMS 0x0008 /* display parameters */ -#define XTHAL_DISASM_OPT_ALL 0x0FFF /* display everything */ - -/* routine to get a string for the disassembled instruction */ -extern int xthal_disassemble( unsigned char *instr_buf, void *tgt_addr, - char *buffer, unsigned buflen, unsigned options ); - -/* routine to get the size of the next instruction. Returns 0 for - illegal instruction */ -extern int xthal_disassemble_size( unsigned char *instr_buf ); - - -/*---------------------------------------------------------------------- - Instruction/Data RAM/ROM Access - ----------------------------------------------------------------------*/ - -extern void* xthal_memcpy(void *dst, const void *src, unsigned len); -extern void* xthal_bcopy(const void *src, void *dst, unsigned len); - - -/*---------------------------------------------------------------------- - MP Synchronization - ----------------------------------------------------------------------*/ - -extern int xthal_compare_and_set( int *addr, int test_val, int compare_val ); - -/*extern const char Xthal_have_s32c1i;*/ - - -/*---------------------------------------------------------------------- - Miscellaneous - ----------------------------------------------------------------------*/ - -extern const unsigned int Xthal_release_major; -extern const unsigned int Xthal_release_minor; -extern const char * const Xthal_release_name; -extern const char * const Xthal_release_internal; - -extern const unsigned char Xthal_memory_order; -extern const unsigned char Xthal_have_windowed; -extern const unsigned char Xthal_have_density; -extern const unsigned char Xthal_have_booleans; -extern const unsigned char Xthal_have_loops; -extern const unsigned char Xthal_have_nsa; -extern const unsigned char Xthal_have_minmax; -extern const unsigned char Xthal_have_sext; -extern const unsigned char Xthal_have_clamps; -extern const unsigned char Xthal_have_mac16; -extern const unsigned char Xthal_have_mul16; -extern const unsigned char Xthal_have_fp; -extern const unsigned char Xthal_have_speculation; -extern const unsigned char Xthal_have_threadptr; - -extern const unsigned char Xthal_have_pif; -extern const unsigned short Xthal_num_writebuffer_entries; - -extern const unsigned int Xthal_build_unique_id; -/* Version info for hardware targeted by software upgrades: */ -extern const unsigned int Xthal_hw_configid0; -extern const unsigned int Xthal_hw_configid1; -extern const unsigned int Xthal_hw_release_major; -extern const unsigned int Xthal_hw_release_minor; -extern const char * const Xthal_hw_release_name; -extern const char * const Xthal_hw_release_internal; - -/* Clear any remnant code-dependent state (i.e. clear loop count regs). */ -extern void xthal_clear_regcached_code( void ); - -#ifdef __cplusplus -} -#endif -#endif /*!_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */ - - - - - -/**************************************************************************** - Definitions Useful for PRIVILEGED (Supervisory or Non-Virtualized) Code - ****************************************************************************/ - - -#ifndef XTENSA_HAL_NON_PRIVILEGED_ONLY - -/*---------------------------------------------------------------------- - Constant Definitions (shared with assembly) - ----------------------------------------------------------------------*/ - -/* - * Architectural limits, independent of configuration. - * Note that these are ISA-defined limits, not micro-architecture implementation - * limits enforced by the Xtensa Processor Generator (which may be stricter than - * these below). - */ -#define XTHAL_MAX_INTERRUPTS 32 /* max number of interrupts (0..31) */ -#define XTHAL_MAX_INTLEVELS 16 /* max number of interrupt levels (0..15) */ - /* (as of T1040, implementation limit is 7: 0..6) */ -#define XTHAL_MAX_TIMERS 4 /* max number of timers (CCOMPARE0..CCOMPARE3) */ - /* (as of T1040, implementation limit is 3: 0..2) */ - -/* Interrupt types: */ -#define XTHAL_INTTYPE_UNCONFIGURED 0 -#define XTHAL_INTTYPE_SOFTWARE 1 -#define XTHAL_INTTYPE_EXTERN_EDGE 2 -#define XTHAL_INTTYPE_EXTERN_LEVEL 3 -#define XTHAL_INTTYPE_TIMER 4 -#define XTHAL_INTTYPE_NMI 5 -#define XTHAL_INTTYPE_WRITE_ERROR 6 -#define XTHAL_INTTYPE_PROFILING 7 -#define XTHAL_INTTYPE_IDMA_DONE 8 -#define XTHAL_INTTYPE_IDMA_ERR 9 -#define XTHAL_INTTYPE_GS_ERR 10 -#define XTHAL_INTTYPE_SG_ERR 10 /* backward compatibility name - deprecated */ -#define XTHAL_MAX_INTTYPES 11 /* number of interrupt types */ - -/* Timer related: */ -#define XTHAL_TIMER_UNCONFIGURED -1 /* Xthal_timer_interrupt[] value for non-existent timers */ -#define XTHAL_TIMER_UNASSIGNED XTHAL_TIMER_UNCONFIGURED /* (for backwards compatibility only) */ - -/* Local Memory ECC/Parity: */ -#define XTHAL_MEMEP_PARITY 1 -#define XTHAL_MEMEP_ECC 2 -/* Flags parameter to xthal_memep_inject_error(): */ -#define XTHAL_MEMEP_F_LOCAL 0 /* local memory (default) */ -#define XTHAL_MEMEP_F_DCACHE_DATA 4 /* data cache data */ -#define XTHAL_MEMEP_F_DCACHE_TAG 5 /* data cache tag */ -#define XTHAL_MEMEP_F_ICACHE_DATA 6 /* instruction cache data */ -#define XTHAL_MEMEP_F_ICACHE_TAG 7 /* instruction cache tag */ -#define XTHAL_MEMEP_F_CORRECTABLE 16 /* inject correctable error - (default is non-corr.) */ - - -/* Access Mode bits (tentative): */ /* bit abbr unit short_name PPC equ - Description */ -#define XTHAL_AMB_EXCEPTION 0 /* 001 E EX fls: EXception none - exception on any access (aka "illegal") */ -#define XTHAL_AMB_HITCACHE 1 /* 002 C CH fls: use Cache on Hit ~(I CI) - [or H HC] way from tag match; - [or U UC] (ISA: same except Isolate case) */ -#define XTHAL_AMB_ALLOCATE 2 /* 004 A AL fl?: ALlocate none - [or F FI fill] refill cache on miss, way from LRU - (ISA: Read/Write Miss Refill) */ -#define XTHAL_AMB_WRITETHRU 3 /* 008 W WT --s: WriteThrough W WT - store immediately to memory (ISA: same) */ -#define XTHAL_AMB_ISOLATE 4 /* 010 I IS fls: ISolate none - use cache regardless of hit-vs-miss, - way from vaddr (ISA: use-cache-on-miss+hit) */ -#define XTHAL_AMB_GUARD 5 /* 020 G GU ?l?: GUard G * - non-speculative; spec/replay refs not permitted */ -#define XTHAL_AMB_COHERENT 6 /* 040 M MC ?ls: Mem/MP Coherent M - on read, other CPU/bus-master may need to supply data; - on write, maybe redirect to or flush other CPU dirty line; etc */ -#if 0 -#define XTHAL_AMB_BUFFERABLE x /* 000 B BU --s: BUfferable ? - write response may return earlier than from final destination */ -#define XTHAL_AMB_ORDERED x /* 000 O OR fls: ORdered G * - mem accesses cannot be out of order */ -#define XTHAL_AMB_FUSEWRITES x /* 000 F FW --s: FuseWrites none - allow combining/merging/coalescing multiple writes - (to same datapath data unit) into one - (implied by writeback) */ -#define XTHAL_AMB_TRUSTED x /* 000 T TR ?l?: TRusted none - memory will not bus error (if it does, - handle as fatal imprecise interrupt) */ -#define XTHAL_AMB_PREFETCH x /* 000 P PR fl?: PRefetch none - on refill, read line+1 into prefetch buffers */ -#define XTHAL_AMB_STREAM x /* 000 S ST ???: STreaming none - access one of N stream buffers */ -#endif /*0*/ - -#define XTHAL_AM_EXCEPTION (1< = bit is set - * '-' = bit is clear - * '.' = bit is irrelevant / don't care, as follows: - * E=1 makes all others irrelevant - * W,F relevant only for stores - * "2345" - * Indicates which Xtensa releases support the corresponding - * access mode. Releases for each character column are: - * 2 = prior to T1020.2: T1015 (V1.5), T1020.0, T1020.1 - * 3 = T1020.2 and later: T1020.2+, T1030 - * 4 = T1040 - * 5 = T1050 (maybe), LX1, LX2, LX2.1 - * 7 = LX2.2 - * 8 = LX3, LX4 - * 9 = LX5 - * And the character column contents are: - * = supported by release(s) - * "." = unsupported by release(s) - * "?" = support unknown - */ - /* foMGIWACE 2345789 */ -/* For instruction fetch: */ -#define XTHAL_FAM_EXCEPTION 0x001 /* ........E 2345789 exception */ -/*efine XTHAL_FAM_ISOLATE*/ /*0x012*/ /* .---I.-C- ....... isolate */ -#define XTHAL_FAM_BYPASS 0x000 /* .----.--- 2345789 bypass */ -/*efine XTHAL_FAM_NACACHED*/ /*0x002*/ /* .----.-C- ....... cached no-allocate (frozen) */ -#define XTHAL_FAM_CACHED 0x006 /* .----.AC- 2345789 cached */ -/* For data load: */ -#define XTHAL_LAM_EXCEPTION 0x001 /* ........E 2345789 exception */ -#define XTHAL_LAM_ISOLATE 0x012 /* .---I.-C- 2345789 isolate */ -#define XTHAL_LAM_BYPASS 0x000 /* .O---.--- 2...... bypass speculative */ -#define XTHAL_LAM_BYPASSG 0x020 /* .O-G-.--- .345789 bypass guarded */ -#define XTHAL_LAM_CACHED_NOALLOC 0x002 /* .O---.-C- 2345789 cached no-allocate speculative */ -#define XTHAL_LAM_NACACHED XTHAL_LAM_CACHED_NOALLOC -#define XTHAL_LAM_NACACHEDG 0x022 /* .O-G-.-C- .?..... cached no-allocate guarded */ -#define XTHAL_LAM_CACHED 0x006 /* .----.AC- 2345789 cached speculative */ -#define XTHAL_LAM_COHCACHED 0x046 /* .-M--.AC- ....*89 cached speculative MP-coherent */ -/* For data store: */ -#define XTHAL_SAM_EXCEPTION 0x001 /* ........E 2345789 exception */ -#define XTHAL_SAM_ISOLATE 0x032 /* .--GI--C- 2345789 isolate */ -#define XTHAL_SAM_BYPASS 0x028 /* -O-G-W--- 2345789 bypass */ -#define XTHAL_SAM_WRITETHRU 0x02A /* -O-G-W-C- 2345789 writethrough */ -/*efine XTHAL_SAM_WRITETHRU_ALLOC*/ /*0x02E*/ /* -O-G-WAC- ....... writethrough allocate */ -#define XTHAL_SAM_WRITEBACK 0x026 /* F--G--AC- ...5789 writeback */ -#define XTHAL_SAM_WRITEBACK_NOALLOC 0x022 /* ?--G---C- .....89 writeback no-allocate */ -#define XTHAL_SAM_COHWRITEBACK 0x066 /* F-MG--AC- ....*89 writeback MP-coherent */ -/* For PIF attributes: */ /* -PIwrWCBUUUU ...9 */ -#define XTHAL_PAM_BYPASS 0x000 /* xxx00000xxxx ...9 bypass non-bufferable */ -#define XTHAL_PAM_BYPASS_BUF 0x010 /* xxx0000bxxxx ...9 bypass */ -#define XTHAL_PAM_CACHED_NOALLOC 0x030 /* xxx0001bxxxx ...9 cached no-allocate */ -#define XTHAL_PAM_WRITETHRU 0x0B0 /* xxx0101bxxxx ...9 writethrough (WT) */ -#define XTHAL_PAM_WRITEBACK_NOALLOC 0x0F0 /* xxx0111bxxxx ...9 writeback no-alloc (WBNA) */ -#define XTHAL_PAM_WRITEBACK 0x1F0 /* xxx1111bxxxx ...9 writeback (WB) */ -/*efine XTHAL_PAM_NORMAL*/ /*0x050*/ /* xxx0010bxxxx .... (unimplemented) */ -/*efine XTHAL_PAM_WRITETHRU_WA*/ /*0x130*/ /* xxx1001bxxxx .... (unimplemented, less likely) */ -/*efine XTHAL_PAM_WRITETHRU_RWA*/ /*0x1B0*/ /* xxx1101bxxxx .... (unimplemented, less likely) */ -/*efine XTHAL_PAM_WRITEBACK_WA*/ /*0x170*/ /* xxx1011bxxxx .... (unimplemented, less likely) */ - - -#if 0 -/* - Cache attribute encoding for CACHEATTR (per ISA): - (Note: if this differs from ISA Ref Manual, ISA has precedence) - - Inst-fetches Loads Stores - ------------- ------------ ------------- -0x0 FCA_EXCEPTION LCA_NACACHED SCA_WRITETHRU cached no-allocate (previously misnamed "uncached") -0x1 FCA_CACHED LCA_CACHED SCA_WRITETHRU cached -0x2 FCA_BYPASS LCA_BYPASS_G* SCA_BYPASS bypass cache (what most people call uncached) -0x3 FCA_CACHED LCA_CACHED SCA_WRITEALLOCF write-allocate - or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) -0x4 FCA_CACHED LCA_CACHED SCA_WRITEBACK[M] write-back [MP-coherent] - or LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) -0x5 FCA_CACHED LCA_CACHED SCA_WRITEBACK_NOALLOC write-back no-allocate - or FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (if unimplemented) -0x6..D FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION (reserved) -0xE FCA_EXCEPTION LCA_ISOLATE SCA_ISOLATE isolate -0xF FCA_EXCEPTION LCA_EXCEPTION SCA_EXCEPTION illegal - * Prior to T1020.2?, guard feature not supported, this defaulted to speculative (no _G) -*/ -#endif /*0*/ - - -#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) -#ifdef __cplusplus -extern "C" { -#endif - - -/*---------------------------------------------------------------------- - Register Windows - ----------------------------------------------------------------------*/ - -/* This spill any live register windows (other than the caller's): - * (NOTE: current implementation require privileged code, but - * a user-callable implementation is possible.) */ -extern void xthal_window_spill( void ); - - -/*---------------------------------------------------------------------- - Optional/Custom Processor State - ----------------------------------------------------------------------*/ - -/* validate & invalidate the TIE register file */ -extern void xthal_validate_cp(int); -extern void xthal_invalidate_cp(int); - -/* read and write cpenable register */ -extern void xthal_set_cpenable(unsigned); -extern unsigned xthal_get_cpenable(void); - - -/*---------------------------------------------------------------------- - Interrupts - ----------------------------------------------------------------------*/ - -/* the number of interrupt levels */ -extern const unsigned char Xthal_num_intlevels; -/* the number of interrupts */ -extern const unsigned char Xthal_num_interrupts; -/* the highest level of interrupts masked by PS.EXCM */ -extern const unsigned char Xthal_excm_level; - -/* mask for level of interrupts */ -extern const unsigned int Xthal_intlevel_mask[XTHAL_MAX_INTLEVELS]; -/* mask for level 0 to N interrupts */ -extern const unsigned int Xthal_intlevel_andbelow_mask[XTHAL_MAX_INTLEVELS]; - -/* level of each interrupt */ -extern const unsigned char Xthal_intlevel[XTHAL_MAX_INTERRUPTS]; - -/* type per interrupt */ -extern const unsigned char Xthal_inttype[XTHAL_MAX_INTERRUPTS]; - -/* masks of each type of interrupt */ -extern const unsigned int Xthal_inttype_mask[XTHAL_MAX_INTTYPES]; - -/* interrupt numbers assigned to each timer interrupt */ -extern const int Xthal_timer_interrupt[XTHAL_MAX_TIMERS]; - -/* INTENABLE,INTERRUPT,INTSET,INTCLEAR register access functions: */ -extern unsigned xthal_get_intenable( void ); -extern void xthal_set_intenable( unsigned ); -extern unsigned xthal_get_interrupt( void ); -#define xthal_get_intread xthal_get_interrupt /* backward compatibility */ - -/* These two functions are deprecated. Use the newer functions - xthal_interrupt_trigger and xthal_interrupt_clear instead. */ -extern void xthal_set_intset( unsigned ); -extern void xthal_set_intclear( unsigned ); - - -/*---------------------------------------------------------------------- - Debug - ----------------------------------------------------------------------*/ - -/* Number of instruction and data break registers: */ -extern const int Xthal_num_ibreak; -extern const int Xthal_num_dbreak; - - -/*---------------------------------------------------------------------- - Core Counter - ----------------------------------------------------------------------*/ - -/* counter info */ -extern const unsigned char Xthal_have_ccount; /* set if CCOUNT register present */ -extern const unsigned char Xthal_num_ccompare; /* number of CCOMPAREn registers */ - -/* get CCOUNT register (if not present return 0) */ -extern unsigned xthal_get_ccount(void); - -/* set and get CCOMPAREn registers (if not present, get returns 0) */ -extern void xthal_set_ccompare(int, unsigned); -extern unsigned xthal_get_ccompare(int); - - -/*---------------------------------------------------------------------- - Miscellaneous - ----------------------------------------------------------------------*/ - -extern const unsigned char Xthal_have_prid; -extern const unsigned char Xthal_have_exceptions; -extern const unsigned char Xthal_xea_version; -extern const unsigned char Xthal_have_interrupts; -extern const unsigned char Xthal_have_highlevel_interrupts; -extern const unsigned char Xthal_have_nmi; - -extern unsigned xthal_get_prid( void ); - - -/*---------------------------------------------------------------------- - Virtual interrupt prioritization (DEPRECATED) - ----------------------------------------------------------------------*/ - -/* Convert between interrupt levels (as per PS.INTLEVEL) and virtual interrupt priorities: */ -extern unsigned xthal_vpri_to_intlevel(unsigned vpri); -extern unsigned xthal_intlevel_to_vpri(unsigned intlevel); - -/* Enables/disables given set (mask) of interrupts; returns previous enabled-mask of all ints: */ -/* These functions are deprecated. Use xthal_interrupt_enable and xthal_interrupt_disable instead. */ -extern unsigned xthal_int_enable(unsigned); -extern unsigned xthal_int_disable(unsigned); - -/* Set/get virtual priority of an interrupt: */ -extern int xthal_set_int_vpri(int intnum, int vpri); -extern int xthal_get_int_vpri(int intnum); - -/* Set/get interrupt lockout level for exclusive access to virtual priority data structures: */ -extern void xthal_set_vpri_locklevel(unsigned intlevel); -extern unsigned xthal_get_vpri_locklevel(void); - -/* Set/get current virtual interrupt priority: */ -extern unsigned xthal_set_vpri(unsigned vpri); -extern unsigned xthal_get_vpri(void); -extern unsigned xthal_set_vpri_intlevel(unsigned intlevel); -extern unsigned xthal_set_vpri_lock(void); - - -/*---------------------------------------------------------------------- - Generic Interrupt Trampolining Support (DEPRECATED) - ----------------------------------------------------------------------*/ - -typedef void (XtHalVoidFunc)(void); - -/* Bitmask of interrupts currently trampolining down: */ -extern unsigned Xthal_tram_pending; - -/* - * Bitmask of which interrupts currently trampolining down synchronously are - * actually enabled; this bitmask is necessary because INTENABLE cannot hold - * that state (sync-trampolining interrupts must be kept disabled while - * trampolining); in the current implementation, any bit set here is not set - * in INTENABLE, and vice-versa; once a sync-trampoline is handled (at level - * one), its enable bit must be moved from here to INTENABLE: - */ -extern unsigned Xthal_tram_enabled; - -/* Bitmask of interrupts configured for sync trampolining: */ -extern unsigned Xthal_tram_sync; - -/* Trampoline support functions: */ -extern unsigned xthal_tram_pending_to_service( void ); -extern void xthal_tram_done( unsigned serviced_mask ); -extern int xthal_tram_set_sync( int intnum, int sync ); -extern XtHalVoidFunc* xthal_set_tram_trigger_func( XtHalVoidFunc *trigger_fn ); - - -/*---------------------------------------------------------------------- - Internal Memories - ----------------------------------------------------------------------*/ - -extern const unsigned char Xthal_num_instrom; -extern const unsigned char Xthal_num_instram; -extern const unsigned char Xthal_num_datarom; -extern const unsigned char Xthal_num_dataram; -extern const unsigned char Xthal_num_xlmi; - -/* Each of the following arrays contains at least one entry, - * or as many entries as needed if more than one: */ -extern const unsigned int Xthal_instrom_vaddr[]; -extern const unsigned int Xthal_instrom_paddr[]; -extern const unsigned int Xthal_instrom_size []; -extern const unsigned int Xthal_instram_vaddr[]; -extern const unsigned int Xthal_instram_paddr[]; -extern const unsigned int Xthal_instram_size []; -extern const unsigned int Xthal_datarom_vaddr[]; -extern const unsigned int Xthal_datarom_paddr[]; -extern const unsigned int Xthal_datarom_size []; -extern const unsigned int Xthal_dataram_vaddr[]; -extern const unsigned int Xthal_dataram_paddr[]; -extern const unsigned int Xthal_dataram_size []; -extern const unsigned int Xthal_xlmi_vaddr[]; -extern const unsigned int Xthal_xlmi_paddr[]; -extern const unsigned int Xthal_xlmi_size []; - - -/*---------------------------------------------------------------------- - Cache - ----------------------------------------------------------------------*/ - -/* number of cache sets in log2(lines per way) */ -extern const unsigned char Xthal_icache_setwidth; -extern const unsigned char Xthal_dcache_setwidth; -/* cache set associativity (number of ways) */ -extern const unsigned int Xthal_icache_ways; -extern const unsigned int Xthal_dcache_ways; -/* cache features */ -extern const unsigned char Xthal_icache_line_lockable; -extern const unsigned char Xthal_dcache_line_lockable; - -/* cache attribute register control (used by other HAL routines) */ -extern unsigned xthal_get_cacheattr( void ); -extern unsigned xthal_get_icacheattr( void ); -extern unsigned xthal_get_dcacheattr( void ); -extern void xthal_set_cacheattr( unsigned ); -extern void xthal_set_icacheattr( unsigned ); -extern void xthal_set_dcacheattr( unsigned ); -/* set cache attribute (access modes) for a range of memory */ -extern int xthal_set_region_attribute( void *addr, unsigned size, - unsigned cattr, unsigned flags ); -/* Bits of flags parameter to xthal_set_region_attribute(): */ -#define XTHAL_CAFLAG_EXPAND 0x000100 /* only expand allowed access to range, don't reduce it */ -#define XTHAL_CAFLAG_EXACT 0x000200 /* return error if can't apply change to exact range specified */ -#define XTHAL_CAFLAG_NO_PARTIAL 0x000400 /* don't apply change to regions partially covered by range */ -#define XTHAL_CAFLAG_NO_AUTO_WB 0x000800 /* don't writeback data after leaving writeback attribute */ -#define XTHAL_CAFLAG_NO_AUTO_INV 0x001000 /* don't invalidate after disabling cache (entering bypass) */ - -/* enable caches */ -extern void xthal_icache_enable( void ); /* DEPRECATED */ -extern void xthal_dcache_enable( void ); /* DEPRECATED */ -/* disable caches */ -extern void xthal_icache_disable( void ); /* DEPRECATED */ -extern void xthal_dcache_disable( void ); /* DEPRECATED */ - -/* whole cache operations (privileged) */ -extern void xthal_icache_all_invalidate( void ); -extern void xthal_dcache_all_invalidate( void ); -extern void xthal_dcache_all_writeback( void ); -extern void xthal_dcache_all_writeback_inv( void ); -extern void xthal_icache_all_unlock( void ); -extern void xthal_dcache_all_unlock( void ); - -/* address-range cache operations (privileged) */ -/* prefetch and lock specified memory range into cache */ -extern void xthal_icache_region_lock( void *addr, unsigned size ); -extern void xthal_dcache_region_lock( void *addr, unsigned size ); -/* unlock from cache */ -extern void xthal_icache_region_unlock( void *addr, unsigned size ); -extern void xthal_dcache_region_unlock( void *addr, unsigned size ); - -/* huge-range cache operations (privileged) (EXPERIMENTAL) */ -extern void xthal_icache_hugerange_invalidate( void *addr, unsigned size ); -extern void xthal_icache_hugerange_unlock( void *addr, unsigned size ); -extern void xthal_dcache_hugerange_invalidate( void *addr, unsigned size ); -extern void xthal_dcache_hugerange_unlock( void *addr, unsigned size ); -extern void xthal_dcache_hugerange_writeback( void *addr, unsigned size ); -extern void xthal_dcache_hugerange_writeback_inv( void *addr, unsigned size ); - -# ifndef XTHAL_USE_CACHE_MACROS -/* cache line operations (privileged) */ -extern void xthal_icache_line_lock(void *addr); -extern void xthal_dcache_line_lock(void *addr); -extern void xthal_icache_line_unlock(void *addr); -extern void xthal_dcache_line_unlock(void *addr); -# endif - - - -/*---------------------------------------------------------------------- - Local Memory ECC/Parity - ----------------------------------------------------------------------*/ - -/* Inject memory errors; flags is bit combination of XTHAL_MEMEP_F_xxx: */ -extern void xthal_memep_inject_error(void *addr, int size, int flags); - - - -/*---------------------------------------------------------------------- - Memory Management Unit - ----------------------------------------------------------------------*/ - -extern const unsigned char Xthal_have_spanning_way; -extern const unsigned char Xthal_have_identity_map; -extern const unsigned char Xthal_have_mimic_cacheattr; -extern const unsigned char Xthal_have_xlt_cacheattr; -extern const unsigned char Xthal_have_cacheattr; -extern const unsigned char Xthal_have_tlbs; - -extern const unsigned char Xthal_mmu_asid_bits; /* 0 .. 8 */ -extern const unsigned char Xthal_mmu_asid_kernel; -extern const unsigned char Xthal_mmu_rings; /* 1 .. 4 (perhaps 0 if no MMU and/or no protection?) */ -extern const unsigned char Xthal_mmu_ring_bits; -extern const unsigned char Xthal_mmu_sr_bits; -extern const unsigned char Xthal_mmu_ca_bits; -extern const unsigned int Xthal_mmu_max_pte_page_size; -extern const unsigned int Xthal_mmu_min_pte_page_size; - -extern const unsigned char Xthal_itlb_way_bits; -extern const unsigned char Xthal_itlb_ways; -extern const unsigned char Xthal_itlb_arf_ways; -extern const unsigned char Xthal_dtlb_way_bits; -extern const unsigned char Xthal_dtlb_ways; -extern const unsigned char Xthal_dtlb_arf_ways; - -/* Return error codes for hal functions */ - -/* function sucessful, operation completed as expected */ -#define XTHAL_SUCCESS 0 -/* XTHAL_CAFLAGS_NO_PARTIAL was specified, and no full region is - * covered by the address range. */ -#define XTHAL_NO_REGIONS_COVERED -1 -/* The XTHAL_CAFLAGS_EXACT flag was given, but no exact mapping is possible. */ -#define XTHAL_INEXACT -2 -/* The supplied address doesn't correspond to the start of a region. */ -#define XTHAL_INVALID_ADDRESS -3 -/* This functionality is not available on this architecture. */ -#define XTHAL_UNSUPPORTED -4 -/* Translation failed because vaddr and paddr were not aligned. */ -#define XTHAL_ADDRESS_MISALIGNED -5 -/* There is mapping for the supplied address. */ -#define XTHAL_NO_MAPPING -6 -/* The requested access rights are not supported */ -#define XTHAL_BAD_ACCESS_RIGHTS -7 -/* The requested memory type is not supported */ -#define XTHAL_BAD_MEMORY_TYPE -8 -/* The entries supplied are not properly aligned to the MPU's background map. */ -#define XTHAL_MAP_NOT_ALIGNED -9 -/* There are not enough MPU entries available to do the requeste mapping. */ -#define XTHAL_OUT_OF_ENTRIES -10 -/* The entries supplied are not properly ordered for the MPU. */ -#define XTHAL_OUT_OF_ORDER_MAP -11 -/* an invalid argument such as a null pointer was supplied to the function */ -#define XTHAL_INVALID -12 -/* specified region is of zero size, therefore no mapping is done. */ -#define XTHAL_ZERO_SIZED_REGION -13 -/* specified range wraps around '0' */ -#define XTHAL_INVALID_ADDRESS_RANGE -14 - -/* - For backward compatibility we retain the following inconsistenly named - constants. Do not use them as they may be removed in a future release. - */ -#define XCHAL_SUCCESS XTHAL_SUCCESS -#define XCHAL_ADDRESS_MISALIGNED XTHAL_ADDRESS_MISALIGNED -#define XCHAL_INEXACT XTHAL_INEXACT -#define XCHAL_INVALID_ADDRESS XTHAL_INVALID_ADDRESS -#define XCHAL_UNSUPPORTED_ON_THIS_ARCH XTHAL_UNSUPPORTED -#define XCHAL_NO_PAGES_MAPPED XTHAL_NO_REGIONS_COVERED - - -/* Convert between virtual and physical addresses (through static maps only) - * WARNING: these two functions may go away in a future release; - * don't depend on them! -*/ -extern int xthal_static_v2p( unsigned vaddr, unsigned *paddrp ); -extern int xthal_static_p2v( unsigned paddr, unsigned *vaddrp, unsigned cached ); - -extern int xthal_set_region_translation(void* vaddr, void* paddr, - unsigned size, unsigned cache_atr, unsigned flags); -extern int xthal_v2p(void*, void**, unsigned*, unsigned*); -extern int xthal_invalidate_region(void* addr); -extern int xthal_set_region_translation_raw(void *vaddr, void *paddr, unsigned cattr); - -/*------------------------------------------------------------------------ - MPU (Memory Protection Unit) --------------------------------------------------------------------------*/ - -/* - * General notes on MPU (Memory Protection Unit): - * - * The MPU supports setting the access rights (read, write, execute) as - * well as the memory type (cacheablity, ...) - * for regions of memory. The granularity can be as small as 32 bytes. - * (XCHAL_MPU_ALIGN specifies the granularity for any specific MPU config) - * - * The MPU doesn't support mapping between virtual and physical addresses. - * - * The MPU contains a fixed number of map changeable forground map entries, - * and a background map which is fixed at configuration time. - * - * Each entry has a start address (up to 27 bits), valid flag, - * access rights (4 bits), and memory type (9 bits); - * - */ - - -/* - MPU access rights constants: - Only the combinations listed below are supported by the MPU. -*/ - -#define XTHAL_AR_NONE 0 /* no access */ -#define XTHAL_AR_R 4 /* Kernel read, User no access*/ -#define XTHAL_AR_RX 5 /* Kernel read/execute, User no access */ -#define XTHAL_AR_RW 6 /* Kernel read/write, User no access */ -#define XTHAL_AR_RWX 7 /* Kernel read/write/execute, User no access */ -#define XTHAL_AR_Ww 8 /* Kernel write, User write */ -#define XTHAL_AR_RWrwx 9 /* Kernel read/write , User read/write/execute */ -#define XTHAL_AR_RWr 10 /* Kernel read/write, User read */ -#define XTHAL_AR_RWXrx 11 /* Kernel read/write/execute, User read/execute */ -#define XTHAL_AR_Rr 12 /* Kernel read, User read */ -#define XTHAL_AR_RXrx 13 /* Kernel read/execute, User read/execute */ -#define XTHAL_AR_RWrw 14 /* Kernel read/write, User read/write */ -#define XTHAL_AR_RWXrwx 15 /* Kernel read/write/execute, - User read/write/execute */ - -#define XTHAL_AR_WIDTH 4 /* # bits used to encode access rights */ - -/* If the bit XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS is set in the accessRights - * argument to xthal_mpu_set_region_attribute(), or to the cattr argument of - * xthal_set_region_attribute() then the existing access rights for the first - * byte of the region will be used as the access rights of the new region. - */ -#define XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS 0x00002000 - -/* If the bit XTHAL_MPU_USE_EXISTING_MEMORY_TYPE is set in the memoryType - * argument to xthal_mpu_set_region_attribute(), or to the cattr argument of - * xthal_set_region_attribute() then the existing memory type for the first - * byte of the region will be used as the memory type of the new region. - */ -#define XTHAL_MPU_USE_EXISTING_MEMORY_TYPE 0x00004000 - -/* The following groups of constants are bit-wise or'd together to specify - * the memory type as input to the macros and functions that accept an - * unencoded memory type specifier: - * XTHAL_ENCODE_MEMORY_TYPE, xthal_encode_memory_type, - * xthal_mpu_set_region_attribute(), and xthal_set_region_attribute(). - * - * example: - * XTHAL_MEM_DEVICE | XTHAL_MEM_INTERRUPTIBLE | XTHAL_MEM_SYSTEM_SHARABLE - * - * or - * XTHAL_MEM_WRITEBACK | XTHAL_MEM_INNER_SHAREABLE - * - * If it is desired to specify different attributes for the system and - * local cache, then macro XTHAL_MEM_PROC_CACHE is used: - * - * XTHAL_MEM_PROC_CACHE(XTHAL_MEM_WRITEBACK, XTHAL_MEM_WRITETHRU) - * - * indicates the shared cache is writeback, but the processor's local cache - * is writethrough. - * - */ - -/* The following group of constants are used to specify cache attributes of - * an MPU entry. If the processors local cache and the system's shared cache - * have the same attributes (or if there aren't distinct local and shared - * caches) then the constant can be used directly. If different attributes - * for the shared and local caches, then use these constants as the parameters - * to the XTHAL_MEM_PROC_CACHE() macro. - */ -#define XTHAL_MEM_DEVICE 0x00008000 -#define XTHAL_MEM_NON_CACHEABLE 0x00090000 -#define XTHAL_MEM_WRITETHRU_NOALLOC 0x00080000 -#define XTHAL_MEM_WRITETHRU 0x00040000 -#define XTHAL_MEM_WRITETHRU_WRITEALLOC 0x00060000 -#define XTHAL_MEM_WRITEBACK_NOALLOC 0x00050000 -#define XTHAL_MEM_WRITEBACK 0x00070000 - -/* Indicates a read is interruptible. Only applicable to devices */ -#define XTHAL_MEM_INTERRUPTIBLE 0x08000000 - -/* Indicates if writes to this memory are bufferable ... only applicable - * to devices, and non-cacheable memory. - */ -#define XTHAL_MEM_BUFFERABLE 0x01000000 - -/* The following group of constants indicates the scope of the sharing of - * the memory region. XTHAL_MEM_INNER_SHAREABLE and XTHAL_MEM_OUTER_SHARABLE are - * only applicable to cacheable regions. XTHAL_MEM_SYSTEM_SHAREABLE is only - * applicable to devices and non-cacheable regions. - */ -#define XTHAL_MEM_NON_SHAREABLE 0x00000000 -#define XTHAL_MEM_INNER_SHAREABLE 0x02000000 -#define XTHAL_MEM_OUTER_SHAREABLE 0x04000000 -#define XTHAL_MEM_SYSTEM_SHAREABLE 0x06000000 - - -/* - * This macro is needed when the cache attributes are different for the shared - * and processor's local caches. For example: - * - * XTHAL_MEM_PROC_CACHE(XTHAL_MEM_WRITEBACK, XTHAL_MEM_NON_CACHEABLE) - * creates a memory type that is writeback cacheable in the system cache, and not - * cacheable in the processor's local cache. - */ -#define XTHAL_MEM_PROC_CACHE(system, processor) \ - (((system) & 0x000f0000) | (((processor) & 0x000f0000 ) << 4) | \ - (((system) & XTHAL_MEM_DEVICE) | ((processor) & XTHAL_MEM_DEVICE))) - -/* - * This macro converts a bit-wise combination of the XTHAL_MEM_... constants - * to the corresponding MPU memory type (9-bits). - * - * Unsupported combinations are mapped to the best available substitute. - * - * The same functionality plus error checking is available from - * xthal_encode_memory_type(). - */ -#define XTHAL_ENCODE_MEMORY_TYPE(x) \ - (((x) & 0xffffe000) ? \ - (_XTHAL_MEM_IS_DEVICE((x)) ? _XTHAL_ENCODE_DEVICE((x)) : \ - (_XTHAL_IS_SYSTEM_NONCACHEABLE((x)) ? \ - _XTHAL_ENCODE_SYSTEM_NONCACHEABLE((x)) : \ - _XTHAL_ENCODE_SYSTEM_CACHEABLE((x)))) : (x)) - -/* - * This structure is used to represent each MPU entry (both foreground and - * background). The internal representation of the structure is subject to - * change, so it should only be accessed by the XTHAL_MPU_ENTRY_... macros - * below. - */ -typedef struct xthal_MPU_entry -{ - unsigned as; /* virtual start address, and valid bit */ - unsigned at; /* access rights, and memory type (and space for entry index) */ -} xthal_MPU_entry; - -extern const xthal_MPU_entry Xthal_mpu_bgmap[]; - - - - -/* - * XTHAL_MPU_ENTRY creates an MPU entry from its component values. It is - * intended for initializing an MPU map. Example: - * const struct xthal_MPU_entry mpumap[] = - { XTHAL_MPU_ENTRY( 0x00000000, 1, XTHAL_AR_RWXrwx, XTHAL_MEM_WRITEBACK), - XTHAL_MPU_ENTRY( 0xE0000000, 1, XTHAL_AR_RWXrwx, - XTHAL_MEM_NON_CACHEABLE | XTHAL_MEM_BUFFERABLE), - XTHAL_MPU_ENTRY( 0xF0000000, 1, XTHAL_AR_RWX, - XTHAL_MEM_NON_CACHEABLE | XTHAL_MEM_BUFFERABLE) }; - xthal_write_map(mpumap, sizeof(mpumap) / sizeof(struct xthal_MPU_entry)); - * - */ -#define XTHAL_MPU_ENTRY(vaddr, valid, access, memtype) \ - { (((vaddr) & 0xffffffe0) | ((valid & 0x1))), \ - (((XTHAL_ENCODE_MEMORY_TYPE(memtype)) << 12) | (((access) & 0xf) << 8)) } - -/* - * These macros get (or set) the specified field of the MPU entry. - */ -#define XTHAL_MPU_ENTRY_GET_VSTARTADDR(x) ((x).as & 0xffffffe0) - -#define XTHAL_MPU_ENTRY_SET_VSTARTADDR(x, vaddr) (x).as = \ - (((x).as) & 0x1) | ((vaddr) & 0xffffffe0) - -#define XTHAL_MPU_ENTRY_GET_VALID(x) (((x).as & 0x1)) - -#define XTHAL_MPU_ENTRY_SET_VALID(x, valid) (x).as = \ - (((x).as & 0xfffffffe) | ((valid) & 0x1)) -#define XTHAL_MPU_ENTRY_GET_ACCESS(x) ((((x).at) >> 8) & 0xf) - -#define XTHAL_MPU_ENTRY_SET_ACCESS(x, accessRights) ((x).at = \ - ((x).at & 0xfffff0ff) | (((accessRights) & 0xf) << 8)) - -#define XTHAL_MPU_ENTRY_GET_MEMORY_TYPE(x) ((((x).at) >> 12) & 0x1ff) - -#define XTHAL_MPU_ENTRY_SET_MEMORY_TYPE(x, memtype) ((x).at = \ - ((x).at & 0xffe00fff) | (((XTHAL_ENCODE_MEMORY_TYPE(memtype)) & 0x1ff) << 12)) - -/* - * These functions accept encoded access rights, and return 1 if the - * supplied memory type has the property specified by the function name, - * otherwise they return 0. - */ -extern int xthal_is_kernel_readable(int accessRights); -extern int xthal_is_kernel_writeable(int accessRights); -extern int xthal_is_kernel_executable(int accessRights); -extern int xthal_is_user_readable(int accessRights); -extern int xthal_is_user_writeable (int accessRights); -extern int xthal_is_user_executable(int accessRights); - - -/* - * This function converts a bit-wise combination of the XTHAL_MEM_.. constants - * to the corresponding MPU memory type (9-bits). - * - * If none of the XTHAL_MEM_.. bits are present in the argument, then - * bits 4-12 (9-bits) are returned ... this supports using an already encoded - * memoryType (perhaps obtained from an xthal_MPU_entry structure) as input - * to xthal_set_region_attribute(). - * - * This function first checks that the supplied constants are a valid and - * supported combination. If not, it returns XTHAL_BAD_MEMORY_TYPE. - */ -extern int xthal_encode_memory_type(unsigned int x); - -/* - * This function accepts a 9-bit memory type value (such as returned by - * XTHAL_MEM_ENTRY_GET_MEMORY_TYPE() or xthal_encode_memory_type(). They - * return 1 if the memoryType has the property specified in the function - * name and 0 otherwise. - */ -extern int xthal_is_cacheable(unsigned int memoryType); -extern int xthal_is_writeback(unsigned int memoryType); -extern int xthal_is_device(unsigned int memoryType); - -/* - * Copies the current MPU entry list into 'entries' which - * must point to available memory of at least - * sizeof(struct xthal_MPU_entry) * XCHAL_MPU_ENTRIES. - * - * This function returns XTHAL_SUCCESS. - * XTHAL_INVALID, or - * XTHAL_UNSUPPORTED. - */ -extern int xthal_read_map(struct xthal_MPU_entry* entries); - -/* - * Writes the map pointed to by 'entries' to the MPU. Before updating - * the map, it commits any uncommitted - * cache writes, and invalidates the cache if necessary. - * - * This function does not check for the correctness of the map. Generally - * xthal_check_map() should be called first to check the map. - * - * If n == 0 then the existing map is cleared, and no new map is written - * (useful for returning to reset state) - * - * If (n > 0 && n < XCHAL_MPU_ENTRIES) then a new map is written with - * (XCHAL_MPU_ENTRIES-n) padding entries added to ensure a properly ordered - * map. The resulting foreground map will be equivalent to the map vector - * fg, but the position of the padding entries should not be relied upon. - * - * If n == XCHAL_MPU_ENTRIES then the complete map as specified by fg is - * written. - * - * The CACHEADRDIS register will be set to enable caching any 512MB region - * that is overlapped by an MPU region with a cacheable memory type. - * Caching will be disabled if none of the 512 MB region is cacheable. - * - * xthal_write_map() disables the MPU foreground map during the MPU - * update and relies on the background map. - * - * As a result any interrupt that does not meet the following conditions - * must be disabled before calling xthal_write_map(): - * 1) All code and data needed for the interrupt must be - * mapped by the background map with sufficient access rights. - * 2) The interrupt code must not access the MPU. - * - */ -extern void xthal_write_map(const struct xthal_MPU_entry* entries, unsigned n); - -/* - * Checks if entry vector 'entries' of length 'n' is a valid MPU access map. - * Returns: - * XTHAL_SUCCESS if valid, - * XTHAL_OUT_OF_ENTRIES - * XTHAL_MAP_NOT_ALIGNED, - * XTHAL_BAD_ACCESS_RIGHTS, - * XTHAL_OUT_OF_ORDER_MAP, or - * XTHAL_UNSUPPORTED if config doesn't have an MPU. - */ -extern int xthal_check_map(const struct xthal_MPU_entry* entries, unsigned n); - -/* - * Returns the MPU entry that maps 'vaddr'. If 'infgmap' is non-NULL then - * *infgmap is set to 1 if 'vaddr' is mapped by the foreground map, and - * *infgmap is set to 0 if 'vaddr' is mapped by the background map. - */ -extern struct xthal_MPU_entry xthal_get_entry_for_address(void* vaddr, - int* infgmap); - -/* - * Scans the supplied MPU map and returns a value suitable for writing to - * the CACHEADRDIS register: - * Bits 0-7 -> 1 if there are no cacheable areas in the corresponding 512MB - * region and 0 otherwise. - * Bits 8-31 -> undefined. - * This function can accept a partial memory map in the same manner - * xthal_write_map() does, */ -extern unsigned int -xthal_calc_cacheadrdis(const struct xthal_MPU_entry* e, unsigned n); - -/* - * This function is intended as an MPU specific version of - * xthal_set_region_attributes(). xthal_set_region_attributes() calls - * this function for MPU configurations. - * - * This function sets the attributes for the region [vaddr, vaddr+size) - * in the MPU. - * - * Depending on the state of the MPU this function will require from - * 0 to 3 unused MPU entries. - * - * This function typically will move, add, and subtract entries from - * the MPU map during execution, so that the resulting map may - * be quite different than when the function was called. - * - * This function does make the following guarantees: - * 1) The MPU access map remains in a valid state at all times - * during its execution. - * 2) At all points during (and after) completion the memoryType - * and accessRights remain the same for all addresses - * that are not in the range [vaddr, vaddr+size). - * 3) If XTHAL_SUCCESS is returned, then the range - * [vaddr, vaddr+size) will have the accessRights and memoryType - * specified. - * 4) The CACHEADRDIS register will be set to enable caching any 512MB region - * that is overlapped by an MPU region with a cacheable memory type. - * Caching will be disabled if none of the 512 MB region is cacheable. - * - * The accessRights parameter should be either a 4-bit value corresponding - * to an MPU access mode (as defined by the XTHAL_AR_.. constants), or - * XTHAL_MPU_USE_EXISTING_ACCESS_RIGHTS. - * - * The memoryType parameter should be either a bit-wise or-ing of XTHAL_MEM_.. - * constants that represent a valid MPU memoryType, a 9-bit MPU memoryType - * value, or XTHAL_MPU_USE_EXISTING_MEMORY_TYPE. - * - * In addition to the error codes that xthal_set_region_attribute() - * returns, this function can also return: XTHAL_BAD_ACCESS_RIGHTS - * (if the access rights bits map to an unsupported combination), or - * XTHAL_OUT_OF_MAP_ENTRIES (if there are not enough unused MPU entries) - * - * If this function is called with an invalid MPU map, then this function - * will return one of the codes that is returned by xthal_check_map(). - * - * The flag, XTHAL_CAFLAG_EXPAND, is not supported - * - */ - -extern int xthal_mpu_set_region_attribute(void* vaddr, unsigned size, - int accessRights, int memoryType, unsigned flags); - -/* The following are internal implementation macros. These should not - * be directly used except by the hal code and headers. -*/ - -/* - * Layout of the MPU specifier for: XTHAL_ENCODE_MEMORY_TYPE(), - * xthal_encode_memory_type(), xthal_set_region_attribute(), - * and xthal_mpu_set_region_attribute(). THIS IS SUBJECT TO CHANGE: - * - * Bits 0-3 - reserved for pass through of accessRights - * Bits 4-12 - reserved for pass through of memoryType bits - * Bit 13 - indicates to use existing access rights of region - * Bit 14 - indicates to use existing memory type of region - * Bit 15 - indicates device - * Bit 16-19- system cache properties - * Bit 20-23- local cache properties - * Bit 24 - indicates bufferable - * Bit 25-26- encodes shareability (1=inner, 2=outer, 3=system) - * Bit 27 - indicates interruptible - * Bits 28-31- reserved for future use - */ -#define _XTHAL_SYSTEM_CACHE_BITS 0x000f0000 -#define _XTHAL_LOCAL_CACHE_BITS 0x00f00000 -#define _XTHAL_MEM_SYSTEM_RWC_MASK 0x00070000 -#define _XTHAL_MEM_LOCAL_RWC_MASK 0x00700000 -#define _XTHAL_SHIFT_RWC 16 - -#define _XTHAL_MEM_ANY_SHAREABLE(x) (((x) & XTHAL_MEM_SYSTEM_SHAREABLE) ? 1 : 0) - -#define _XTHAL_MEM_INNER_SHAREABLE(x) ((((x) & XTHAL_MEM_SYSTEM_SHAREABLE) \ - == XTHAL_MEM_INNER_SHAREABLE) ? 1 : 0) - -#define _XTHAL_MEM_IS_BUFFERABLE(x) (((x) & XTHAL_MEM_BUFFERABLE) ? 1 : 0) - -#define _XTHAL_MEM_IS_DEVICE(x) (((x) & XTHAL_MEM_DEVICE) ? 1 : 0) - -#define _XTHAL_NON_CACHEABLE_DOMAIN(x) \ - (_XTHAL_MEM_IS_DEVICE(x) || _XTHAL_MEM_ANY_SHAREABLE(x)? 0x3 : 0) - -#define _XTHAL_CACHEABLE_DOMAIN(x) (_XTHAL_MEM_ANY_SHAREABLE(x) ? \ - 0x3 : 0x1) - -#define _XTHAL_MEM_CACHE_MASK(x) ((x) & _XTHAL_SYSTEM_CACHE_BITS) - -#define _XTHAL_IS_SYSTEM_NONCACHEABLE(x) \ - (((_XTHAL_MEM_CACHE_MASK(x) & XTHAL_MEM_NON_CACHEABLE) == \ - XTHAL_MEM_NON_CACHEABLE) ? 1 : 0) - -#define _XTHAL_ENCODE_DEVICE(x) \ - (((((x) & XTHAL_MEM_INTERRUPTIBLE) ? 1 : 0) << 3) | \ - (_XTHAL_NON_CACHEABLE_DOMAIN(x) << 1) | _XTHAL_MEM_IS_BUFFERABLE(x)) - -#define _XTHAL_ENCODE_SYSTEM_NONCACHEABLE(x) \ - (0x18 | (_XTHAL_NON_CACHEABLE_DOMAIN(x) << 1) \ - | _XTHAL_MEM_IS_BUFFERABLE(x)) - -#define _XTHAL_ENCODE_SYSTEM_CACHEABLE(x) \ - (((((((x) & _XTHAL_LOCAL_CACHE_BITS) >> 4) & XTHAL_MEM_NON_CACHEABLE) == \ - XTHAL_MEM_NON_CACHEABLE) ? 1 : 0) ? \ - (_XTHAL_CACHEABLE_DOMAIN(x) << 4) : \ - _XTHAL_ENCODE_SYSTEM_CACHEABLE_LOCAL_CACHEABLE(x)) | \ - ((_XTHAL_MEM_INNER_SHAREABLE(x) << 3) | \ - (_XTHAL_MEM_CACHE_MASK(x) & _XTHAL_MEM_SYSTEM_RWC_MASK) \ - >> _XTHAL_SHIFT_RWC) - -#define _XTHAL_ENCODE_SYSTEM_CACHEABLE_LOCAL_CACHEABLE(x) \ - ((_XTHAL_CACHEABLE_DOMAIN(x) << 7) | (((((x) & _XTHAL_LOCAL_CACHE_BITS) ? \ - ((x) & _XTHAL_LOCAL_CACHE_BITS) : \ - (_XTHAL_MEM_CACHE_MASK(x) << 4)) \ - & (_XTHAL_MEM_LOCAL_RWC_MASK)) >> _XTHAL_SHIFT_RWC )) - -/* End of internal macros */ - -/* The functions and constants below here have been deprecated.*/ -#define XTHAL_MEM_NON_CACHED XTHAL_MEM_NON_CACHEABLE -#define XTHAL_MEM_NON_SHARED XTHAL_MEM_NON_SHAREABLE -#define XTHAL_MEM_INNER_SHARED XTHAL_MEM_INNER_SHAREABLE -#define XTHAL_MEM_OUTER_SHARED XTHAL_MEM_OUTER_SHAREABLE -#define XTHAL_MEM_SYSTEM_SHARED XTHAL_MEM_SYSTEM_SHAREABLE -#define XTHAL_MEM_SW_SHAREABLE 0 - -#define xthal_is_cached(memoryType) (xthal_is_cacheable((memoryType))) -extern int xthal_read_background_map(struct xthal_MPU_entry* entries); - -/* end deprecated functions and constants */ - -#ifdef __cplusplus -} -#endif -#endif /*!_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */ - -#endif /* !XTENSA_HAL_NON_PRIVILEGED_ONLY */ - - - - -/**************************************************************************** - EXPERIMENTAL and DEPRECATED Definitions - ****************************************************************************/ - - -#if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__) -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef INCLUDE_DEPRECATED_HAL_CODE -extern const unsigned char Xthal_have_old_exc_arch; -extern const unsigned char Xthal_have_mmu; -extern const unsigned int Xthal_num_regs; -extern const unsigned char Xthal_num_iroms; -extern const unsigned char Xthal_num_irams; -extern const unsigned char Xthal_num_droms; -extern const unsigned char Xthal_num_drams; -extern const unsigned int Xthal_configid0; -extern const unsigned int Xthal_configid1; -#endif - -#ifdef INCLUDE_DEPRECATED_HAL_DEBUG_CODE -#define XTHAL_24_BIT_BREAK 0x80000000 -#define XTHAL_16_BIT_BREAK 0x40000000 -extern const unsigned short Xthal_ill_inst_16[16]; -#define XTHAL_DEST_REG 0xf0000000 /* Mask for destination register */ -#define XTHAL_DEST_REG_INST 0x08000000 /* Branch address is in register */ -#define XTHAL_DEST_REL_INST 0x04000000 /* Branch address is relative */ -#define XTHAL_RFW_INST 0x00000800 -#define XTHAL_RFUE_INST 0x00000400 -#define XTHAL_RFI_INST 0x00000200 -#define XTHAL_RFE_INST 0x00000100 -#define XTHAL_RET_INST 0x00000080 -#define XTHAL_BREAK_INST 0x00000040 -#define XTHAL_SYSCALL_INST 0x00000020 -#define XTHAL_LOOP_END 0x00000010 /* Not set by xthal_inst_type */ -#define XTHAL_JUMP_INST 0x00000008 /* Call or jump instruction */ -#define XTHAL_BRANCH_INST 0x00000004 /* Branch instruction */ -#define XTHAL_24_BIT_INST 0x00000002 -#define XTHAL_16_BIT_INST 0x00000001 -typedef struct xthal_state { - unsigned pc; - unsigned ar[16]; - unsigned lbeg; - unsigned lend; - unsigned lcount; - unsigned extra_ptr; - unsigned cpregs_ptr[XTHAL_MAX_CPS]; -} XTHAL_STATE; -extern unsigned int xthal_inst_type(void *addr); -extern unsigned int xthal_branch_addr(void *addr); -extern unsigned int xthal_get_npc(XTHAL_STATE *user_state); -#endif /* INCLUDE_DEPRECATED_HAL_DEBUG_CODE */ - -#ifdef __cplusplus -} -#endif -#endif /*!_ASMLANGUAGE && !_NOCLANGUAGE && !__ASSEMBLER__ */ - -#endif /*XTENSA_HAL_H*/ - diff --git a/src/arch/xtensa/include/xtensa/idmaasm.h b/src/arch/xtensa/include/xtensa/idmaasm.h deleted file mode 100755 index 2669ff064ffd..000000000000 --- a/src/arch/xtensa/include/xtensa/idmaasm.h +++ /dev/null @@ -1,72 +0,0 @@ -/* $Id: //depot/dev/Foxhill/Xtensa/OS/include/xtensa/mpuasm.h#5 $ */ - -/* - * Copyright (c) 2016 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _IDMAASM_H_ -#define _IDMAASM_H_ - -#if XCHAL_HAVE_IDMA -#include -#endif - -/* - * Macro for restore IDMA regs - * - * Parameters: - * a_save => address register containing pointer to IDMA save area - * a_temp1, a_temp2, a_temp3. => address register temporaries - */ -// IDMA_REG_SETTINGS, -// IDMA_REG_TIMEOUT, -// IDMA_REG_DESC_START, -// IDMA_REG_CONTROL, -// IDMA_REG_USERPRIV, - -.macro _idma_restore a_save, a_temp1, a_temp2, a_temp3 -#if XCHAL_HAVE_IDMA - l32i \a_temp1, \a_save, 0 - movi \a_temp3, idmareg_base - movi \a_temp2, IDMA_REG_SETTINGS - add \a_temp2, \a_temp2, \a_temp3 - wer \a_temp1, \a_temp2 - l32i \a_temp1, \a_save, 4 - movi \a_temp2, IDMA_REG_TIMEOUT - add \a_temp2, \a_temp2, \a_temp3 - wer \a_temp1, \a_temp2 - l32i \a_temp1, \a_save, 8 - movi \a_temp2, IDMA_REG_DESC_START - add \a_temp2, \a_temp2, \a_temp3 - wer \a_temp1, \a_temp2 - l32i \a_temp1, \a_save, 12 - movi \a_temp2, IDMA_REG_CONTROL - add \a_temp2, \a_temp2, \a_temp3 - wer \a_temp1, \a_temp2 - l32i \a_temp1, \a_save, 16 - movi \a_temp2, IDMA_REG_USERPRIV - add \a_temp2, \a_temp2, \a_temp3 - wer \a_temp1, \a_temp2 -#endif -.endm - -#endif //_IDMAASM_H_ diff --git a/src/arch/xtensa/include/xtensa/mpuasm.h b/src/arch/xtensa/include/xtensa/mpuasm.h deleted file mode 100644 index f14dacc0b044..000000000000 --- a/src/arch/xtensa/include/xtensa/mpuasm.h +++ /dev/null @@ -1,111 +0,0 @@ -/* $Id: //depot/dev/Foxhill/Xtensa/OS/include/xtensa/mpuasm.h#5 $ */ - -/* - * Copyright (c) 2016 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _MPUASM_H_ -#define _MPUASM_H_ -#include - -/* - * Macro for writing MPU map. - * - * Parameters: - * a_map => address register containing pointer to MPU map - * a_num_entries => number of entries in the forementioned map - * a_temp1, a_temp2. => address register temporaries - * a_temp3, a_temp4 - */ - -.macro mpu_write_map a_map, a_num_entries, a_temp1, a_temp2, a_temp3, a_temp4 -#if XCHAL_HAVE_MPU - movi \a_temp1, 0 - wsr.cacheadrdis \a_temp1 // enable the cache in all regions - wsr.mpuenb \a_temp1 // disable all foreground entries - - // Clear out the unused entries. - // - // Currently we are clearing out all the entries because currently - // the entries must be ordered even if they are all disabled. - // If out of order entries were permitted when all are disabled, - // performance could be improved by clearing XCHAL_MPU_ENTRIES - n - // (n = number of entries) rather than XCHAL_MPU_ENTRIES - 1 entries. - // - movi \a_temp2, 0 - movi \a_temp3, XCHAL_MPU_ENTRIES - 1 - j 1f - .align 16 // this alignment is done to ensure that -1: - memw // todo currently wptlb must be preceeded by a memw. The instructions must - // be aligned to ensure that both are in the same cache line. These statements should be - // properly conditionalized when that restriction is removed from the HW - wptlb \a_temp2, \a_temp1 - addi \a_temp2, \a_temp2, 1 - bltu \a_temp2, \a_temp3, 1b - - // Write the new entries. - // - beqz \a_num_entries, 4f // if no entries, skip loop - addx8 \a_map, \a_num_entries, \a_map // compute end of provided map - j 3f - .align 16 -2: memw // todo currently wptlb must be preceeded by a memw. The instructions must - // be aligned to ensure that both are in the same cache line. These statements should be - // properly conditionalized when that restriction is removed from the HW - wptlb \a_temp2, \a_temp4 - addi \a_temp3, \a_temp3, -1 - beqz \a_num_entries, 4f // loop until done -3: addi \a_map, \a_map, -8 - l32i \a_temp2, \a_map, 4 // get at (acc.rights, memtype) - l32i \a_temp4, \a_map, 0 // get as (vstart, valid) - addi \a_num_entries, \a_num_entries, -1 - extui \a_temp1, \a_temp2, 0, 5 // entry index portion - xor \a_temp2, \a_temp2, \a_temp1 // zero it - or \a_temp2, \a_temp2, \a_temp3 // set index = \a_temp3 - j 2b -4: -#endif -.endm - -/* - * Macro for reading MPU map - * - * Parameters: - * a_map_ptr => address register pointing to memory where map is written - * a_temp1, a_temp2 => address register temporaries - */ -.macro mpu_read_map a_map_ptr, a_temp1, a_temp2 -#if XCHAL_HAVE_MPU - movi \a_temp1, XCHAL_MPU_ENTRIES // set index to last entry + 1 - addx8 \a_map_ptr, \a_temp1, \a_map_ptr // set map ptr to last entry + 1 -1: addi \a_temp1, \a_temp1, -1 // decrement index - addi \a_map_ptr, \a_map_ptr, -8 // decrement index - rptlb0 \a_temp2, \a_temp1 // read 1/2 of entry - s32i \a_temp2, \a_map_ptr, 0 // write 1/2 of entry - rptlb1 \a_temp2, \a_temp1 - s32i \a_temp2, \a_map_ptr, 4 - bnez \a_temp1, 1b // loop until done -#endif - .endm - -#endif diff --git a/src/arch/xtensa/include/xtensa/overlay.h b/src/arch/xtensa/include/xtensa/overlay.h deleted file mode 100644 index 9e41cd2ebc2d..000000000000 --- a/src/arch/xtensa/include/xtensa/overlay.h +++ /dev/null @@ -1,190 +0,0 @@ -// overlay.h -- Overlay manager header file -// $Id$ - -// Copyright (c) 2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#ifndef OVERLAY_H -#define OVERLAY_H - - -#include - - -#ifdef __cplusplus -extern "C" { -#endif - - -// Overlays not supported for CALL0 ABI -#if defined (__XTENSA_CALL0_ABI__) -#undef XT_DISABLE_OVERLAYS -#define XT_DISABLE_OVERLAYS 1 -#endif - -// Define this to turn off overlay support -#ifdef XT_DISABLE_OVERLAYS - -#define OVERLAY(n) -#define DECLARE_OVERLAY(n) - -#define xt_overlay_map(ov_id) -#define xt_overlay_map_async(ov_id) 0 -#define xt_overlay_map_in_progress() 0 -#define xt_overlay_get_id() 0 -#define xt_overlay_get_state(pc) 0 -#define xt_overlay_check_map(pc,ps,ovstate,sp) 0 - -#else - -// Shorthand for convenience and portability. -#define OVERLAY(n) __attribute__((overlay(n))) - -// Structure of the overlay table required by gdb and the overlay -// manager. Should not be accessed by user code unless overriding -// the load process. -struct ovly_table { - void * vma; // The overlay's mapped address. - unsigned int size; // The size of the overlay, in bytes. - void * lma; // The overlay's load address. - unsigned int mapped; // Non-zero if overlay is currently mapped; zero otherwise. -}; - -// Constructed by the linker. Required for gdb and for the overlay -// manager. Should not be accessed by user code unless overriding -// the load process. -extern struct ovly_table _ovly_table[]; - -// Functions. -void xt_overlay_map(int ov_id); -int xt_overlay_map_async(int ov_id); -int xt_overlay_map_in_progress(void); -unsigned int xt_overlay_get_state(unsigned int pc); -unsigned int xt_overlay_check_map(unsigned int * pc, unsigned int * ps, - unsigned int ovstate, unsigned int sp); -int xt_overlay_start_map(void * dst, void * src, unsigned int len, int ov_id); -int xt_overlay_is_mapping(int ov_id); -void xt_overlay_fatal_error(int ov_id); - - -// Returns the current overlay ID. If no overlay is mapped or an overlay -// is in the middle of being mapped, returns -1. Inlined to avoid calling -// out of overlay (wastes cycles, can end up reading wrong ID on interrupt -// activity). -// -static inline int __attribute__((always_inline)) xt_overlay_get_id(void) -{ -extern short _mapping_id; -extern short _ovly_id; - - int ret; - unsigned int flags = XTOS_SET_INTLEVEL(15); - - if (_mapping_id >= 0) { - ret = -1; - } - else { - ret = _ovly_id; - } - - XTOS_RESTORE_INTLEVEL(flags); - return ret; -} - - -// The following macros are used to declare numbered overlays and generate -// the corresponding call stubs. Use as follows: -// -// DECLARE_OVERLAY(n) -// -// See documentation for more details. - -//#include - -// At this time overlays are not supported without windowing. -#if defined(__XTENSA_WINDOWED_ABI__) - -#define xstr(x) str(x) -#define str(x) #x - -// At entry, register a8 holds the return address and a9 holds the target -// function address. This stub saves a8 on the stack at (SP - 20) which -// is the only location that is safe for us to use. Then it allocates 32 -// bytes on the stack for working storage, loads the overlay number into -// a8, and jumps to the common handler. The common handler will make sure -// that the called function is loaded into memory before calling it. -// NOTE: we are using the stack area normally reserved for nested functions. -// This means nested functions cannot be used when overlays are in use. - -#define CALL_IN(num) \ - asm(".section .gnu.linkonce.t.overlay.call." xstr(num) ".text, \"ax\"\n" \ - ".global _overlay_call_in_" xstr(num) "_\n" \ - ".align 4\n" \ - "_overlay_call_in_" xstr(num) "_:\n" \ - "s32e a8, a1, -20\n" \ - "addi a8, a1, -32\n" \ - "movsp a1, a8\n" \ - "movi a8, " xstr(num) "\n" \ - "j _overlay_call_in_common\n" \ - ".size _overlay_call_in_" xstr(num) "_, . - _overlay_call_in_" xstr(num) "_\n"); - -// The call-out stub first calls the target function, then loads the overlay -// number into register a14 and jumps to the common handler. The handler will -// make sure that the caller function is present in memory before returning. -// Note that registers a10-a13 may contain return values so must be preserved. -// -// Because we came here via a call4, the return address is in a4, and the top -// 2 bits are set to the window increment. We'll restore the top 2 bits of -// the return address from the called function's address, assuming that both -// are in the same 1 GB segment. For now this is always true. - -#define CALL_OUT(num) \ - asm(".section .gnu.linkonce.t.overlay.call." xstr(num) ".text, \"ax\"\n" \ - ".global _overlay_call_out_" xstr(num) "_\n" \ - ".align 4\n" \ - "_overlay_call_out_" xstr(num) "_:\n" \ - "slli a4, a4, 2\n" \ - "srli a4, a4, 2\n" \ - "extui a8, a9, 30, 2\n" \ - "slli a8, a8, 30\n" \ - "or a4, a4, a8\n" \ - "callx8 a9\n" \ - "movi a14, " xstr(num) "\n" \ - "j _overlay_call_out_common\n" \ - ".size _overlay_call_out_" xstr(num) "_, . - _overlay_call_out_" xstr(num) "_\n"); - -// Generate a call-in and a call-out stub for each overlay. - -#define DECLARE_OVERLAY(num) \ - CALL_IN(num) \ - CALL_OUT(num) - -#endif // defined(__XTENSA_WINDOWED_ABI__) - -#endif // XT_DISABLE_OVERLAYS - -#ifdef __cplusplus -} -#endif - -#endif // OVERLAY_H - diff --git a/src/arch/xtensa/include/xtensa/overlay_os_asm.h b/src/arch/xtensa/include/xtensa/overlay_os_asm.h deleted file mode 100644 index 4adc044e6a6c..000000000000 --- a/src/arch/xtensa/include/xtensa/overlay_os_asm.h +++ /dev/null @@ -1,140 +0,0 @@ -// overlay_os_asm.h -- Overlay manager assembly macros for OS use. -// $Id$ - -// Copyright (c) 2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#ifndef OVERLAY_OS_ASM_H -#define OVERLAY_OS_ASM_H - -// The macros in here are intended to be used by RTOS task switch code -// to check overlay status. Such code is usually in assembly and cannot -// call C code without penalty. For C code usage, it is best to use the -// corresponding C functions from the library. - - -// Inline assembly version of xt_overlay_get_state(). The arguments are -// three AR registers (a0-a15): -// -// "pcreg" - should contain the outgoing task's PC, i.e. the point at -// which the task got interrupted. The return value is also -// returned in this register. -// "sr1/2" - Scratch registers. These must be distinct from "pcreg". -// -// The return value is a 32-bit result that should be saved with the -// task context and passed as-is to xt_overlay_check_map. - - .macro _xt_overlay_get_state pcreg sr1 sr2 - - movi \sr1, _mapping_id - movi \sr2, _ovly_id - l16si \sr1, \sr1, 0 - l16ui \sr2, \sr2, 0 - slli \sr1, \sr1, 16 - or \pcreg, \sr1, \sr2 - - .endm - - -// Inline assembly version of xt_overlay_check_map(). It requires 5 AR -// registers (a0-a15) as arguments. -// -// "pcreg" - should contain the interrupted task's PC, i.e. the point -// at which the task got interrupted. This will be adjusted -// if required. -// "psreg" - should contain the interrupted task's PS. This will be -// adjusted if required. -// "ovreg" - should contain the overlay state on entry. Contents may -// be clobbered. -// "spreg" - should contain the tasks stack pointer on entry. -// "sr1" - Scratch register. Must be distinct from any of the above. -// -// The return values are "pcreg" and "psreg" and these must be used -// to update the task's PC and PS. -// Note that this macro may store data below the "spreg" pointer. If -// it does, then it will also disable interrupts via the PS, so that -// the task resumes with all interrupts disabled (to avoid corrupting -// this data). -// -// (SP - 24) Overlay ID to restore -// (SP - 28) Task PC -// (SP - 32) Task PS - - .macro _xt_overlay_check_map pcreg psreg ovreg spreg sr1 - -// There are four cases to deal with: -// -// _ovly_id = -1, _mapping_id = -1 -// No overlay is mapped or mapping, nothing to do. -// -// _ovly_id >= 0, _mapping_id = -1 -// An overlay was mapped, check PC to see if we need a restore. -// -// _ovly_id = -1, _mapping_id >= 0 -// An overlay is being mapped. Either it belongs to this task, which -// implies that the PC is in the mapping function, or it does not -// belong to this task. Either way there is nothing to do. -// -// _ovly_id >= 0, _mapping_id >= 0 -// Illegal, cannot happen by design. Don't need to handle this. -// -// So, the logic is to check _ovly_id first. If this is >= 0, then -// we check the task PC. If the PC is in the regions of interest then -// we'll patch the return PC to invoke xt_overlay_restore. - -.L1: - extui \sr1, \ovreg, 0, 16 // Extract _ovly_id - bbsi.l \sr1, 15, .Lno // If -1 then we're done - mov \ovreg, \sr1 // Restore this one - -// Next check the PC to see if it falls within the ranges of interest. - -.L2: - movi \sr1, _overlay_vma // Is PC < VMA range ? - bltu \pcreg, \sr1, .L3 - movi \sr1, _overlay_vma_end // Is PC > VMA range ? - bgeu \pcreg, \sr1, .L3 - j .L4 // PC is in VMA range -.L3: - movi \sr1, _overlay_call_stubs_start // Is PC < call stubs range ? - bltu \pcreg, \sr1, .Lno - movi \sr1, _overlay_call_stubs_end // Is PC > call stubs range ? - bgeu \pcreg, \sr1, .Lno - -// If we get here then a restore is needed. Save the overlay ID, PC and PS. -// Return modified PC and PS so that xt_overlay_restore() will execute in -// the context of the task when resumed. Note that the OS resumption code -// may expect PS.EXCM to be set so we leave it as is in the return value. - -.L4: - s32e \ovreg, \spreg, -24 // Save overlay ID - s32e \pcreg, \spreg, -28 // Save task PC - s32e \psreg, \spreg, -32 // Save task PS - movi \pcreg, xt_overlay_restore // Adjust resumption PC - movi \sr1, 15 - or \psreg, \psreg, \sr1 // Set intlevel to highest -.Lno: - - .endm - -#endif // OVERLAY_OS_ASM_H - diff --git a/src/arch/xtensa/include/xtensa/simboard.h b/src/arch/xtensa/include/xtensa/simboard.h deleted file mode 100644 index 980b0b759635..000000000000 --- a/src/arch/xtensa/include/xtensa/simboard.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2001 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* simboard.h - Xtensa ISS "Board" specific definitions */ - -#ifndef _INC_SIMBOARD_H_ -#define _INC_SIMBOARD_H_ - -#include -#include - - -/* - * Device addresses. - */ - -/* System ROM: */ -#define XTBOARD_ROM_SIZE XSHAL_ROM_SIZE -#ifdef XSHAL_ROM_VADDR -#define XTBOARD_ROM_VADDR XSHAL_ROM_VADDR -#endif -#ifdef XSHAL_ROM_PADDR -#define XTBOARD_ROM_PADDR XSHAL_ROM_PADDR -#endif - -/* System RAM: */ -#define XTBOARD_RAM_SIZE XSHAL_RAM_SIZE -#ifdef XSHAL_RAM_VADDR -#define XTBOARD_RAM_VADDR XSHAL_RAM_VADDR -#endif -#ifdef XSHAL_RAM_PADDR -#define XTBOARD_RAM_PADDR XSHAL_RAM_PADDR -#endif - - -/* - * Things that depend on device addresses. - */ - -#define XTBOARD_CACHEATTR_WRITEBACK XSHAL_ISS_CACHEATTR_WRITEBACK -#define XTBOARD_CACHEATTR_WRITEALLOC XSHAL_ISS_CACHEATTR_WRITEALLOC -#define XTBOARD_CACHEATTR_WRITETHRU XSHAL_ISS_CACHEATTR_WRITETHRU -#define XTBOARD_CACHEATTR_BYPASS XSHAL_ISS_CACHEATTR_BYPASS -#define XTBOARD_CACHEATTR_DEFAULT XSHAL_ISS_CACHEATTR_DEFAULT - -#define XTBOARD_BUSINT_PIPE_REGIONS 0 -#define XTBOARD_BUSINT_SDRAM_REGIONS 0 - - -#endif /*_INC_SIMBOARD_H_*/ - diff --git a/src/arch/xtensa/include/xtensa/specreg.h b/src/arch/xtensa/include/xtensa/specreg.h deleted file mode 100644 index 1805811c28eb..000000000000 --- a/src/arch/xtensa/include/xtensa/specreg.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Xtensa Special Register symbolic names - */ - -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/include/xtensa/specreg.h#1 $ */ - -/* - * Copyright (c) 2005-2011 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTENSA_SPECREG_H -#define XTENSA_SPECREG_H - -/* Special registers: */ -#define LBEG 0 -#define LEND 1 -#define LCOUNT 2 -#define SAR 3 -#define BR 4 -#define LITBASE 5 -#define SCOMPARE1 12 -#define ACCLO 16 -#define ACCHI 17 -#define MR_0 32 -#define MR_1 33 -#define MR_2 34 -#define MR_3 35 -#define PREFCTL 40 -#define WINDOWBASE 72 -#define WINDOWSTART 73 -#define PTEVADDR 83 -#define RASID 90 -#define ITLBCFG 91 -#define DTLBCFG 92 -#define IBREAKENABLE 96 -#define MEMCTL 97 -#define CACHEATTR 98 /* until T1050, XEA1 */ -#define CACHEADRDIS 98 /* LX7+ */ -#define ATOMCTL 99 -#define DDR 104 -#define MECR 110 -#define IBREAKA_0 128 -#define IBREAKA_1 129 -#define DBREAKA_0 144 -#define DBREAKA_1 145 -#define DBREAKC_0 160 -#define DBREAKC_1 161 -#define CONFIGID0 176 -#define EPC_1 177 -#define EPC_2 178 -#define EPC_3 179 -#define EPC_4 180 -#define EPC_5 181 -#define EPC_6 182 -#define EPC_7 183 -#define DEPC 192 -#define EPS_2 194 -#define EPS_3 195 -#define EPS_4 196 -#define EPS_5 197 -#define EPS_6 198 -#define EPS_7 199 -#define CONFIGID1 208 -#define EXCSAVE_1 209 -#define EXCSAVE_2 210 -#define EXCSAVE_3 211 -#define EXCSAVE_4 212 -#define EXCSAVE_5 213 -#define EXCSAVE_6 214 -#define EXCSAVE_7 215 -#define CPENABLE 224 -#define INTERRUPT 226 -#define INTREAD INTERRUPT /* alternate name for backward compatibility */ -#define INTSET INTERRUPT /* alternate name for backward compatibility */ -#define INTCLEAR 227 -#define INTENABLE 228 -#define PS 230 -#define VECBASE 231 -#define EXCCAUSE 232 -#define DEBUGCAUSE 233 -#define CCOUNT 234 -#define PRID 235 -#define ICOUNT 236 -#define ICOUNTLEVEL 237 -#define EXCVADDR 238 -#define CCOMPARE_0 240 -#define CCOMPARE_1 241 -#define CCOMPARE_2 242 -#define MISC_REG_0 244 -#define MISC_REG_1 245 -#define MISC_REG_2 246 -#define MISC_REG_3 247 - -/* Special cases (bases of special register series): */ -#define MR 32 -#define IBREAKA 128 -#define DBREAKA 144 -#define DBREAKC 160 -#define EPC 176 -#define EPS 192 -#define EXCSAVE 208 -#define CCOMPARE 240 -#define MISC_REG 244 - -/* Tensilica-defined user registers: */ -#if 0 -/*#define ... 21..24 */ /* (545CK) */ -/*#define ... 140..143 */ /* (545CK) */ -#define EXPSTATE 230 /* Diamond */ -#define THREADPTR 231 /* threadptr option */ -#define FCR 232 /* FPU */ -#define FSR 233 /* FPU */ -#define AE_OVF_SAR 240 /* HiFi2 */ -#define AE_BITHEAD 241 /* HiFi2 */ -#define AE_TS_FTS_BU_BP 242 /* HiFi2 */ -#define AE_SD_NO 243 /* HiFi2 */ -#define VSAR 240 /* VectraLX */ -#define ROUND_LO 242 /* VectraLX */ -#define ROUND_HI 243 /* VectraLX */ -#define CBEGIN 246 /* VectraLX */ -#define CEND 247 /* VectraLX */ -#endif - -#endif /* XTENSA_SPECREG_H */ - diff --git a/src/arch/xtensa/include/xtensa/tie/xt_virtualops.h b/src/arch/xtensa/include/xtensa/tie/xt_virtualops.h deleted file mode 100644 index 45ff64f29791..000000000000 --- a/src/arch/xtensa/include/xtensa/tie/xt_virtualops.h +++ /dev/null @@ -1,37 +0,0 @@ -// Customer ID=10631; Build=0x90af6; Copyright (c) 2017-2019 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -/* Do not modify. This is automatically generated.*/ - -/* parasoft-begin-suppress ALL "This file not MISRA checked." */ - -#ifndef _XTENSA_xt_virtualops_h_HEADER -#define _XTENSA_xt_virtualops_h_HEADER - - -/* Header includes start */ - - -/* Header includes end */ - -#endif /* !_XTENSA_xt_virtualops_h_HEADER */ - -/* parasoft-end-suppress ALL "This file not MISRA checked." */ diff --git a/src/arch/xtensa/include/xtensa/trax-api.h b/src/arch/xtensa/include/xtensa/trax-api.h deleted file mode 100644 index aa1584359bc4..000000000000 --- a/src/arch/xtensa/include/xtensa/trax-api.h +++ /dev/null @@ -1,93 +0,0 @@ -/* Misc TRAX API function definitions. - - Copyright (c) 2007-2012 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - -#ifndef _TRAX_API_H_ -#define _TRAX_API_H_ - -#include -#include -#include "tpack.h" -#include "traxreg.h" - -#include "xdm-regs.h" - -/* Flags for trax_stop(): */ -#define TRAX_STOP_HALT 0x0001 /* halt immediately, don't wait for post-stop-trigger capture */ -#define TRAX_STOP_QUIET 0x0002 /* don't display informative messages */ - - -/* - * Describes a TRAX channel (based on tpack). - */ -typedef struct { - tpack_channel chan; /* channel structure header */ - /* Per TRAX unit information: */ - int trax_version; /* TRAX_ID_VER(id), one of TRAX_VER_xxx macros */ - unsigned long trax_tram_size; /* size of trace RAM in bytes */ - int trax_erratum10; /* set if TRAX 1.0 erratum workarounds needed */ - int trax_erratum20; /* set if TRAX 2.0 erratum workaround needed (PR 22161)*/ - int trax_erratum20_size; - int trax_has_busy; /* has trace-busy feature */ - int trax_has_atb; /* has ATB feature */ - /*FIXME: add various features: coresight regs (don't call it that), APB, ATB, TRAM, ... */ -} trax_channel; - - -/* Prototypes: */ - -/* TRAX Protocol API: */ -extern int trax_read_register(tpack_channel *tchan, int regno, unsigned *value); -extern int trax_write_register(tpack_channel *tchan, int regno, unsigned value); -extern int trax_read_memory(tpack_channel *tchan, int address, int size, unsigned char *pdata); -extern int trax_fill_memory(tpack_channel *tchan, int address, int size, tpack_u32 pattern); -extern int trax_enumerate_devices(tpack_channel *tchan, int * buf, int * size); - -/* TRAX Network API: */ -extern unsigned long trax_ram_size(tpack_channel *traxchan); -extern unsigned long trax_ram_size_addr(tpack_channel *traxchan); -extern int trax_create_tracefile(tpack_channel *traxchan, int size, unsigned char * data, - char *filename, int hflags, const char *toolver); -extern int trax_memaccess_safe(tpack_channel *traxchan, const char *what); -extern int trax_start(tpack_channel *traxchan, int flags); -extern int trax_stop(tpack_channel *traxchan, int flags); -extern int trax_halt(tpack_channel *traxchan, int flags); -extern int trax_save(tpack_channel *traxchan, char *filename, int flags, const char *toolver, int erratum); - -/* TRAX Misc API (no network dependencies): */ -int trax_fixed_hw(unsigned * regs); -extern int trax_display_id(unsigned id, const char *prefix); -extern int trax_display_summary(unsigned id, - unsigned status, - unsigned control, - unsigned address, - unsigned delay, - unsigned trigger, - unsigned match, - unsigned startaddr, - unsigned endaddr, - const char *prefix); - -/* Other: */ - -#endif /* _TRAX_API_H_ */ - diff --git a/src/arch/xtensa/include/xtensa/trax-core-config.h b/src/arch/xtensa/include/xtensa/trax-core-config.h deleted file mode 100644 index 42a03334aa17..000000000000 --- a/src/arch/xtensa/include/xtensa/trax-core-config.h +++ /dev/null @@ -1,144 +0,0 @@ -/* Definitions for Xtensa processor config info needed for TRAX. - - Copyright (c) 2005-2011 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - -#ifndef TRAX_CORE_CONFIG_H -#define TRAX_CORE_CONFIG_H - -#include "xtensa-params.h" - -/* - * Vector Enumerations. - */ - -/* These must match the LX2.0 and later traceport spec: */ -#define VEC_NO_VECTOR 0 -#define VEC_FIRST VEC_RESET /* first valid vector */ -#define VEC_RESET 1 -#define VEC_DEBUG 2 -#define VEC_NMI 3 -#define VEC_USER 4 -#define VEC_KERNEL 5 -#define VEC_DOUBLE 6 -#define VEC_MEMERR 7 -#define VEC_RESERVED8 8 -#define VEC_RESERVED9 9 -#define VEC_WINO4 10 -#define VEC_WINU4 11 -#define VEC_WINO8 12 -#define VEC_WINU8 13 -#define VEC_WINO12 14 -#define VEC_WINU12 15 -#define VEC_INTLEVEL2 16 -#define VEC_INTLEVEL3 17 -#define VEC_INTLEVEL4 18 -#define VEC_INTLEVEL5 19 -#define VEC_INTLEVEL6 20 -/* These are internal, i.e. don't appear like this on traceport: */ -#define VEC_DEBUG_OCD 21 -#define VEC_UNKNOWN 22 -/* Enumerations 23 through 31 are also reserved, but putting */ -/* placeholders here seems wasteful and unnecessary. */ -#define VEC_COUNT 23 - -/* Other branch (change-of-PC-flow) type encodings; - * if PC changes due to an exception or interrupt vector, - * one of the VEC_* values above is used, otherwise - * (or if it's unknown whether it's due to an exception/interrupt) - * one of the following is used: */ - -#define BRANCH_IS_VEC(n) ((n) < VEC_COUNT) /* is known to be except/interrupt? */ -#define BRANCH_OR_VEC 24 /* unknown type of branch (branch/exception/interrupt/etc) */ -#define BRANCH_UNKNOWN 25 /* unknown type of branch (anything but except/interrupt) */ -#define BRANCH_UNKNOWN_ERR 26 /* like BRANCH_UNKNOWN with known error (non-branch instr) */ -#define BRANCH_LOOPBACK 28 /* zero-overhead loopback (from LEND to LBEG) */ -#define BRANCH_CONDTAKEN 29 /* conditional branch taken (or LOOP{NEZ,GTZ} loop skip) */ -#define BRANCH_JUMP 30 /* jump (unconditional branch, i.e. J or JX) */ -#define BRANCH_IS_CALL(n) (((n) & ~3) == 32) /* is a function call? */ -#define BRANCH_CALL0 32 /* non-windowed function call (CALL0, CALLX0) */ -#define BRANCH_CALL4 33 /* windowed function call (CALL4, CALLX4) */ -#define BRANCH_CALL8 34 /* windowed function call (CALL8, CALLX8) */ -#define BRANCH_CALL12 35 /* windowed function call (CALL12, CALLX12) */ -#define BRANCH_IS_RETURN(n) ((n) >= 36) /* is any kind of return? */ -#define BRANCH_IS_CALLRETURN(n) (((n) & ~1) == 36) /* is a function return? */ -#define BRANCH_RET 36 /* non-windowed function return (RET or RET.N) */ -#define BRANCH_RETW 37 /* windowed function return (RETW or RETW.N) */ -#define BRANCH_IS_EIRETURN(n) ((n) >= 38) /* is an except/inter. return? */ -#define BRANCH_RFE 38 /* RFE or RFUE */ -#define BRANCH_RFDE 39 /* RFDE */ -#define BRANCH_RFWO 40 /* RFWO */ -#define BRANCH_RFWU 41 /* RFWU */ -#define BRANCH_RFI_2 42 /* RFI 2 */ -#define BRANCH_RFI_3 43 /* RFI 3 */ -#define BRANCH_RFI_4 44 /* RFI 4 */ -#define BRANCH_RFI_5 45 /* RFI 5 */ -#define BRANCH_RFI_6 46 /* RFI 6 */ -#define BRANCH_RFI_NMI 47 /* RFI NMILEVEL */ -#define BRANCH_RFI_DEBUG 48 /* RFI DEBUGLEVEL */ -#define BRANCH_RFME 49 /* RFME */ -#define BRANCH_COUNT 50 /* (number of defined BRANCH_xxx values) */ - - - -typedef struct { - unsigned vaddr; - unsigned vaddr2; /* for static vectors only (reloc vectors option) */ - int is_configured; -} trax_vector_t; - - -/* - * This structure describes those portion of a Tensilica processor's - * configuration that are useful for trace. - */ -typedef struct { - char ** isa_dlls; - char * core_name; /* (XPG core name, not necessarily same as XTENSA_CORE) */ - int big_endian; /* 0 = little-endian, 1 = big-endian */ - int has_loops; /* 1 = zero overhead loops configured */ - int has_autorefill; /* 1 = TLB autorefill (MMU) configured */ - unsigned max_instr_size; /* in bytes (eg. 3, 4, 8, ...) */ - unsigned int_level_max; /* number of interrupt levels configured (without NMI) */ - int debug_level; /* debug intlevel, 0 if debug not configured */ - int nmi_level; /* NMI intlevel, 0 if NMI not configured */ - unsigned targethw_min; /* min. targeted hardware version (XTENSA_HWVERSION_) */ - unsigned targethw_max; /* max. targeted hardware version (XTENSA_HWVERSION_) */ - int reloc_vectors; /* 0 = fixed vectors, 1 = relocatable vectors */ - int statvec_select; /* 0 = stat vec base 0, 1 = stat vec base 1 (SW default) */ - int vecbase_align; /* number of bits to align VECBASE (32 - bits in VECBASE) */ - unsigned statvec_base0; /* static vector base 0 */ - unsigned statvec_base1; /* static vector base 1 */ - unsigned vecbase_reset; /* reset value of VECBASE */ - trax_vector_t vectors[VEC_COUNT]; /* all vectors... */ -} trax_core_config_t; - - -/* Globals: */ -//extern const char * const trax_vector_short_names[/*VEC_COUNT*/]; // nobody uses this one -extern const char * const trax_vector_names[/*VEC_COUNT*/]; - -/* Prototypes: */ -extern int trax_read_params (trax_core_config_t *c, xtensa_params p); -extern int trax_vector_from_address(trax_core_config_t *config, unsigned long vaddr, unsigned long *vecbases); - -#endif /* TRAX_CORE_CONFIG_H */ - diff --git a/src/arch/xtensa/include/xtensa/trax-proto.h b/src/arch/xtensa/include/xtensa/trax-proto.h deleted file mode 100644 index 41d5c9fd769d..000000000000 --- a/src/arch/xtensa/include/xtensa/trax-proto.h +++ /dev/null @@ -1,91 +0,0 @@ -/* This file contains functions that are hidden from the user. These are - * protocol specific functions used to read and write TRAX registers - * and the trace memory - */ - -/* - * Copyright (c) 2012-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef _TRAX_PROTO_H -#define _TRAX_PROTO_H - -#ifdef __cplusplus -extern "C" { -#endif - -/* Function to read register - * - * regno : The register number to be read (not ERI addressed) - * data : Location where the read value is kept - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_read_register_eri (int regno, unsigned *data); - -/* Function to write a value into a register - * - * regno : The register number to be written (not ERI addressed) - * value : The value to be written at that register location - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_write_register_eri (int regno, unsigned value); - -/* Function to read memory - * - * address : Address of the TraceRAM memory, each location has a word - * len : Amount of memory in bytes, to be read - * data : buffer in which the read memory is stored - * final_address: Next address to be read in the following call to this - * function (trace save mechanism) - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_read_memory_eri (unsigned address, int len, int *data, - unsigned *final_address); - -/* Function to write a value to the memory address - * - * address : Address of the TraceRAM memory - * value : The value to be written inside that location - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_write_memory_eri (int address, unsigned value); - -/* Function to write to a subfield of the register. - * Called by set and show parameter functions. - * - * regno : Register number - * regmask : Mask in order to toggle appropriate bits - * value : Value to be written in the masked location - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_write_register_field_eri (int regno, unsigned regmask, - unsigned value); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/arch/xtensa/include/xtensa/trax-util.h b/src/arch/xtensa/include/xtensa/trax-util.h deleted file mode 100644 index 123ac366dfb4..000000000000 --- a/src/arch/xtensa/include/xtensa/trax-util.h +++ /dev/null @@ -1,63 +0,0 @@ -/* This file contains utility functions that can be used for polling TRAX - * or executing higher level save functionality - * It assumes that print subroutines and file I/O routines are available - * on the system - */ - -/* - * Copyright (c) 2012-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _TRAX_UTIL_H -#define _TRAX_UTIL_H - - -#ifdef __cplusplus -extern "C" { -#endif - -/* User can use this function if he wants to generate a tracefile output. - * Internally it calls trax_get_trace in a loop until it realizes that - * the entire trace has been read. - * - * context : pointer to structure which contains information about the - * current TRAX session - * filename : user specified output trace file name. If the file does not - * exist, it would create the new file, else would append to it - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_save (trax_context *context, char *filename); - -/* Displays a brief machine readable status. - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_poll (trax_context *context); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/src/arch/xtensa/include/xtensa/trax.h b/src/arch/xtensa/include/xtensa/trax.h deleted file mode 100644 index 47049c51d215..000000000000 --- a/src/arch/xtensa/include/xtensa/trax.h +++ /dev/null @@ -1,409 +0,0 @@ -/* Header file for TRAX control Library */ - -/* - * Copyright (c) 2012-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _TRAX_H -#define _TRAX_H - -#ifdef __cplusplus -extern "C" { -#endif - -#define TRAX_STOP_HALT 0x0001 -#define TRAX_STOP_QUIET 0x0002 - -/* Flag values to indicate if the user wanted to reverse the pcstop - * parameters */ -#define TRAX_PCSTOP_REVERSE 0x0001 -#define TRAX_PCSTOP_NO_REVERSE 0x0000 - -/* Indicating whether postsize should be in terms of bytes, instructions - * or percentage of trace size captured */ -#define TRAX_POSTSIZE_BYTES 0x0000 -#define TRAX_POSTSIZE_INSTR 0x0001 -#define TRAX_POSTSIZE_PERCENT 0x0002 - -/* Size of the header inside the trace file */ -#define TRAX_HEADER_SIZE 256 - -/* Minimum size between start and end addresses */ -#define TRAX_MIN_TRACEMEM 64 - -/* For basic debugging */ -#define DEBUG 0 - -#include - -#define ffs(i) __builtin_ffs(i) - -/* Data structures */ - -/* Represents the context of the TRAX unit and the current TRAX session. - * To be used by set and show function calls to set and show appropriate - * parameters of appropriate TRAX unit. - */ - -typedef struct { - int trax_version; /* TRAX PC version information */ - unsigned long trax_tram_size; /* If trace RAM is present,size of it */ - int hflags; /* Flags that can be used to debug, - print info, etc. */ - int address_read_last; /* During saving of the trace, this - indicates the address from which - the current trace reading must - resume */ - unsigned long bytes_read; /* bytes read uptil now */ - unsigned long total_memlen; /* Total bytes to be read based on the - trace collected in the trace RAM */ - bool get_trace_started; /* indicates that the first chunk of - bytes (which include the header) has - been read */ -} trax_context; - - -/* -----------------------TRAX Initialization ------------------------------*/ - -/* Initializing the trax context. Reads registers and sets values for version, - * trace RAM size, total memory length, etc. Most of the other values are - * initialized to their default case. - * - * context : pointer to structure which contains information about the - * current TRAX session - * - * returns : 0 if successful, -1 if unsuccessful, -2 if ram_size if - * incorrect - */ -int trax_context_init_eri (trax_context *context); - -/* -----------------Starting/Stopping TRAX session -------------------------*/ - -/* Start tracing with current parameter setting. If tracing is already in - * progress, an error is reported. Otherwise, tracing starts and any unsaved - * contents of the TraceRAM is discarded - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if successful, 1 if trace is already active, - * -1 if unsuccessful - */ -int trax_start (trax_context *context); - -/* This command initiates a stop trigger or halts a trace session based of the - * value of the flag parameter passed. In case stop trigger is initiated, any - * selected post-stop-trigger capture proceeds normally. - * If trace capture was not in progress, or a stop was already triggered, the - * return value indicates appropriately. - * - * context : pointer to structure which contains information about the - * current TRAX session - * flags : To differentiate between stopping trace without any - * post-size-trigger capture (trax_halt) or with that. - * A zero value would stop the trace based on trigger and a - * value of one would halt it - * - * returns : 0 if successful, 1 if already stopped, -1 if unsuccessful - */ -int trax_stop_halt (trax_context *context, int flags); - -/* Resets the TRAX parameters to their default values which would internally - * involve resetting the TRAX registers. To invoke another trace session or - * reset the current tracing mechanism, this function needs to be called as - * it resets parameters of the context that deal with tracing information - * - * context : pointer to structure which contains information about the - * current TRAX session - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_reset (trax_context *context); - -/* ---------------Set/Get several TRAX parameters --------------------------*/ - -/* Sets the start address and end address (word aligned) of the trace in the - * TraceRAM. Care must be taken to ensure that the difference between the - * start and the end addresses is atleast TRAX_MIN_TRACEMEM bytes. If not, - * the values are reset to default, which is 0 for startaddr and - * traceRAM_words -1 for endaddr - * - * context : pointer to structure which contains information about the - * current TRAX session - * startaddr : value to which the start address must be set. Can be - * any value between 0 - (traceRAM_words - 1) - * endaddr : value to which the end address must be set. Can be any value - * between 0 - (traceRAM_words - 1) - * - * returns : 0 if successful, -1 if unsuccessful, -2 if the difference - * between the start and end addresses is less than - * TRAX_MIN_TRACEMEM bytes or if they are passed incorrect - * values, -3 if memory shared option is not configured, in - * which case, start and end addresses are set to default - * values instead of those passed by the user - */ -int trax_set_ram_boundaries (trax_context *context, unsigned startaddr, - unsigned endaddr); - -/* Shows the start address and end address(word aligned) of the trace in the - * TraceRAM. If incorrect, the startaddress and the endaddress values are - * set to default, i.e. 0 for startaddr and traceRAM_words - 1 for endaddr - * - * context : pointer to structure which contains information about the - * current TRAX session - * startaddr : pointer to value which will contain the start address - * endaddr : pointer to value which will contain the end address - * - * returns : 0 if successful, -1 if unsuccessful - * - */ -int trax_get_ram_boundaries (trax_context *context, unsigned *startaddr, - unsigned *endaddr); - -/* Selects stop trigger via cross-trigger input - * - * context : pointer to structure which contains information about the - * current TRAX session - * value : 0 = off (reset value), 1 = on - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_set_ctistop (trax_context *context, unsigned value); - -/* Shows if stop-trigger via cross-trigger input is off or on - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if off, 1 if on, -1 if unsuccessful - */ -int trax_get_ctistop (trax_context *context); - -/* Selects stop trigger via processor-trigger input - * - * context : pointer to structure which contains information about the - * current TRAX session - * value : 0 = off (reset value), 1 = on - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_set_ptistop (trax_context *context, unsigned value); - -/* Shows if stop trigger visa processor-trigger input is off or on - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if off, 1 if on, -1 if unsuccessful - */ -int trax_get_ptistop (trax_context *context); - -/* Reports cross trigger output state - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if CTO bit is reset, 1 if CTO bit is set - */ -int trax_get_cto (trax_context *context); - -/* Reports processor trigger output state - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if PTO bit is reset, 1 if PTO bit is set - */ -int trax_get_pto (trax_context *context); - -/* Selects condition that asserts cross trigger output - * - * context : pointer to structure which contains information about the - * current TRAX session - * option : 0 = off(reset value)/1 = ontrig/2 = onhalt - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_set_ctowhen (trax_context *context, int option); - -/* Shows condition that asserted cross trigger output. It can be - * any of: ontrig or onhalt or even off - * - * context : pointer to structure which contains information about the - * current TRAX session - * - * returns : 0 if off, 1 if ontrig, 2 if onhalt, -1 if unsuccessful - */ -int trax_get_ctowhen (trax_context *context); - -/* Selects condition that asserts processor trigger output - * - * context : pointer to structure which contains information about the - * current TRAX session - * option : 0 = off(reset value)/1 = ontrig/2 = onhalt - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_set_ptowhen (trax_context *context, int option); - - -/* Shows condition that asserted processor trigger output. It can be - * any of: ontrig or onhalt or even off - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : 0 if off, 1 if ontrig, 2 if onhalt, -1 if unsuccessful - */ -int trax_get_ptowhen (trax_context *context); - -/* Selects the trace synchronization message period. - * If ATEN enabled, we cannot allow syncper to be off, set it to reset value. - * Also, if no trace RAM, and ATEN enabled, set syncper to be reset value - * i.e. 256. A value of 1 i.e. on indicates that internally the message - * frequency is set to an optimal value. This option should be preferred - * if the user is not sure what message frequency option to set for the - * trace session. - * - * context : pointer to structure which contains information about the - * current TRAX session - * option : 0 = off, 1 = on, -1 = auto, 8, 16, 32, 64, 128, - * 256 (reset value) - * - * returns : 0 if successful, -1 if unsuccessful, -2 if incorrect - * arguments - */ -int trax_set_syncper (trax_context *context, int option); - -/* Shows trace synchronization message period. Can be one of: - * off, on, auto, 8, 16, 32, 64, 128, 256 (reset value) - * - * context : pointer to structure which contains information about the - * current TRAX session - * returns : value of sync period, 0 if off, -1 if unsuccessful - */ -int trax_get_syncper (trax_context *context); - -/* Selects stop trigger via PC match. Specifies the address or - * address range to match against program counter. Trace stops when the - * processor executes an instruction matching the specified address - * or range. - * - * context : pointer to structure which contains information about the - * current TRAX session - * index : indicates the number of stop trigger (currently there is - * only one i.e. index = 0) - * startaddress : start range of the address at which the stop trigger - * should be activated - * enaddress : end range of the address at which the stop trigger should - * be activated - * flags : If non-zero, this inverts the range. i.e. trace stops - * when the processor executes an instruction that does not - * match the specified address or range - * - * returns : 0 if successful, -1 if unsuccessful, -2 if incorrect - * arguments (unaligned) - * - * Note : For the current version of TRAX library, the endaddress and - * startaddress can differ by at most 31 bytes and the total - * range i.e. (endaddress - startaddress + 1) has to be a power - * of two - */ -int trax_set_pcstop (trax_context *context, int index, unsigned long startaddress, - unsigned long endaddress, int flags); - -/* Shows the stop trigger via PC match - * - * context : pointer to structure which contains information about the - * current TRAX session - * index : container of information about the number of stop triggers - * startaddress : container of start range of stop trigger - * endaddress : container of end range of stop trigger - * flags : container of information whcih indicates whether the - * pc stop range is inverted or not. - * - * returns : 0 if successful, -1 if unsuccessful - */ -int trax_get_pcstop (trax_context *context, int *index, - unsigned long *startaddress, - unsigned long *endaddress, int *flags); - -/* This function is used to set the amount of trace to be captured past - * the stop trigger. - * - * context : pointer to structure which contains information about the - * current TRAX session - * count_unit : contains the count of units (instructions or bytes) to be - * captured post trigger. If 0, it implies that this is off - * unit : unit of measuring the count. 0 is bytes, 1 is instructions - * 2 is percentage of trace - * - * returns : 0 if successful, -1 if unsuccessful, -2 if incorrect - * arguments - * - */ -int trax_set_postsize (trax_context *context, int count_unit, int unit); - -/* This function shows the amount of TraceRAM in terms of the number of - * instructions or bytes, captured post the stop trigger - * - * context : pointer to structure which contains information about the - * current TRAX session - * count_unit : will contain the count of units(instructions or bytes) post - * trigger - * unit : will contain information about the events that are counted - * 0 implies that the traceRAM words consumed are counted and - * 1 implies that the target processor instructions executed and - * excpetions/interrupts taken are counted - * - * returns : 0 if postsize was got successfully, -1 if unsuccessful - */ -int trax_get_postsize (trax_context *context, int *count_unit, int *unit); - -/* -------------------------- TRAX save routines ---------------------------*/ - -/* This function should be called by the user to return a chunk of - * bytes in buf. It can be a lower layer function of save, or can be - * called by the user explicitly. If bytes_actually_read contains a 0 - * after a call to this function has been made, it implies that the entire - * trace has been read successfully. - * - * context : pointer to structure which contains information about - * the current TRAX session - * buf : Buffer that is allocated by the user, all the trace - * data read would be put in this buffer, which can then - * be used to generate a tracefile. - * The first TRAX_HEADER_SIZE of the buffer will always - * contain the header information. - * bytes_to_be_read : Indicates the bytes the user wants to read. The first - * invocation would need this parameter to be - * TRAX_HEADER_SIZE at least. - * - * returns : bytes actually read during the call to this function. - * 0 implies that all the bytes in the trace have been - * read, -1 if unsuccessful read/write of - * registers or memory, -2 if trace was active while - * this function was called, -3 if user enters - * bytes_to_be_read < TRAX_HEADER_SIZE in the first - * pass - */ -int trax_get_trace (trax_context *context, void *buf, - int bytes_to_be_read); -#ifdef __cplusplus -} -#endif - -#endif /* _TRAX_H */ diff --git a/src/arch/xtensa/include/xtensa/traxfile.h b/src/arch/xtensa/include/xtensa/traxfile.h deleted file mode 100644 index 4afc926a5076..000000000000 --- a/src/arch/xtensa/include/xtensa/traxfile.h +++ /dev/null @@ -1,62 +0,0 @@ -/* TRAX file header definition. - - Copyright (c) 2007-2012 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - - -#define TRAX_FHEAD_MAGIC "TRAXdmp" -#define TRAX_FHEAD_VERSION 1 - -/* Header flags: */ -#define TRAX_FHEADF_OCD_ENABLED 0x00000001 /* set if OCD was enabled while capturing trace */ -#define TRAX_FHEADF_TESTDUMP 0x00000002 /* set if is a test file - (from 'memsave' instead of 'save') */ -#define TRAX_FHEADF_OCD_ENABLED_WHILE_EXIT 0x00000004 /* set if OCD was enabled while capturing trace and - we were exiting the OCD mode */ - -/* Header at the start of a TRAX dump file. */ -typedef struct { - char magic[8]; /* 00: "TRAXdmp\0" (TRAX_FHEAD_MAGIC) */ - char endianness; /* 08: 0=little-endian, 1=big-endian */ - char version; /* 09: TRAX_FHEAD_VERSION */ - char reserved0[2]; /* 0A: ... */ - unsigned filesize; /* 0C: size of the trace file, including this header */ - unsigned trace_ofs; /* 10: start of trace output, byte offset from start of header */ - unsigned trace_size; /* 14: size of trace output in bytes */ - unsigned dumptime; /* 18: date/time of capture save (secs since 1970-01-01), 0 if unknown */ - unsigned flags; /* 1C: misc flags (TRAX_FHEAD_F_xxx) */ - char username[16]; /* 20: user doing the capture/save (up to 15 chars) */ - char toolver[24]; /* 30: tool + version used for capture/save (up to 23 chars) */ - char reserved2[40]; /* 48: (reserved - could be hostname used for dump (up to 39 chars)) */ - unsigned configid[2]; /* 70: processor ConfigID values, 0 if unknown */ - unsigned ts_freq; /* 78: timestamp frequency, 0 if not specified */ - unsigned reserved3; /* 7C: (reserved) */ - unsigned id; /* 80: TRAX registers at time of save (0 if not read) */ - unsigned control; - unsigned status; - unsigned reserved4; /* Data register (should not be read) */ - unsigned address; - unsigned trigger; - unsigned match; - unsigned delay; - unsigned trax_regs[24]; /*100: (total size) -- dummy allocation (FIXME) */ -} trax_file_header; - diff --git a/src/arch/xtensa/include/xtensa/traxreg.h b/src/arch/xtensa/include/xtensa/traxreg.h deleted file mode 100644 index 282ba1feef21..000000000000 --- a/src/arch/xtensa/include/xtensa/traxreg.h +++ /dev/null @@ -1,199 +0,0 @@ -/* TRAX register definitions - - Copyright (c) 2006-2012 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - -#ifndef _TRAX_REGISTERS_H_ -#define _TRAX_REGISTERS_H_ - -#define SHOW 1 -#define HIDE 0 - -#define RO 0 -#define RW 1 - -/* TRAX Register Numbers (from possible range of 0..127) */ -#if 0 -#define TRAXREG_ID 0 -#define TRAXREG_CONTROL 1 -#define TRAXREG_STATUS 2 -#define TRAXREG_DATA 3 -#define TRAXREG_ADDRESS 4 -#define TRAXREG_TRIGGER 5 -#define TRAXREG_MATCH 6 -#define TRAXREG_DELAY 7 -#define TRAXREG_STARTADDR 8 -#define TRAXREG_ENDADDR 9 -/* Internal use only (unpublished): */ -#define TRAXREG_P4CHANGE 16 -#define TRAXREG_P4REV 17 -#define TRAXREG_P4DATE 18 -#define TRAXREG_P4TIME 19 -#define TRAXREG_PDSTATUS 20 -#define TRAXREG_PDDATA 21 -#define TRAXREG_STOP_PC 22 -#define TRAXREG_STOP_ICNT 23 -#define TRAXREG_MSG_STATUS 24 -#define TRAXREG_FSM_STATUS 25 -#define TRAXREG_IB_STATUS 26 -#define TRAXREG_MAX 27 -#define TRAXREG_ITCTRL 96 -#endif -/* The registers above match the NAR addresses. So, their values are used for NAR access */ - -/* TRAX Register Fields */ - -/* TRAX ID register fields: */ -#define TRAX_ID_PRODNO 0xf0000000 /* product number (0=TRAX) */ -#define TRAX_ID_PRODOPT 0x0f000000 /* product options */ -#define TRAX_ID_MIW64 0x08000000 /* opt: instruction width */ -#define TRAX_ID_AMTRAX 0x04000000 /* opt: collection of options, - internal (VER_2_0 or later)*/ -#define TRAX_ID_MAJVER(id) (((id) >> 20) & 0x0f) -#define TRAX_ID_MINVER(id) (((id) >> 17) & 0x07) -#define TRAX_ID_VER(id) ((TRAX_ID_MAJVER(id)<<4)|TRAX_ID_MINVER(id)) -#define TRAX_ID_STDCFG 0x00010000 /* standard config */ -#define TRAX_ID_CFGID 0x0000ffff /* TRAX configuration ID */ -#define TRAX_ID_MEMSHARED 0x00001000 /* Memshared option in TRAX */ -#define TRAX_ID_FROM_VER(ver) ((((ver) & 0xf0) << 16) | (((ver) & 0x7) << 17)) -/* Other TRAX ID register macros: */ -/* TRAX versions of interest (TRAX_ID_VER(), ie. MAJVER*16 + MINVER): */ -#define TRAX_VER_1_0 0x10 /* RA */ -#define TRAX_VER_1_1 0x11 /* RB thru RC-2010.1 */ -#define TRAX_VER_2_0 0x20 /* RC-2010.2, RD-2010.0, - RD-2011.1 */ -#define TRAX_VER_2_1 0x21 /* RC-2011.3 / RD-2011.2 and - later */ -#define TRAX_VER_3_0 0x30 /* RE-2012.0 */ -#define TRAX_VER_3_1 0x31 /* RE-2012.1 */ -#define TRAX_VER_HUAWEI_3 TRAX_VER_3_0 /* For Huawei, PRs: 25223, 25224 - , 24880 */ - - -/* TRAX version 1.0 requires a couple software workarounds: */ -#define TRAX_ID_1_0_ERRATUM(id) (TRAX_ID_VER(id) == TRAX_VER_1_0) -/* TRAX version 2.0 requires software workaround for PR 22161: */ -#define TRAX_ID_MEMSZ_ERRATUM(id) (TRAX_ID_VER(id) == TRAX_VER_2_0) - -/* TRAX Control register fields: */ -#define TRAX_CONTROL_TREN 0x00000001 -#define TRAX_CONTROL_TRSTP 0x00000002 -#define TRAX_CONTROL_PCMEN 0x00000004 -#define TRAX_CONTROL_PTIEN 0x00000010 -#define TRAX_CONTROL_CTIEN 0x00000020 -#define TRAX_CONTROL_TMEN 0x00000080 /* 2.0+ */ -#define TRAX_CONTROL_CNTU 0x00000200 -#define TRAX_CONTROL_BIEN 0x00000400 -#define TRAX_CONTROL_BOEN 0x00000800 -#define TRAX_CONTROL_TSEN 0x00000800 -#define TRAX_CONTROL_SMPER 0x00007000 -#define TRAX_CONTROL_SMPER_SHIFT 12 -#define TRAX_CONTROL_PTOWT 0x00010000 -#define TRAX_CONTROL_CTOWT 0x00020000 -#define TRAX_CONTROL_PTOWS 0x00100000 -#define TRAX_CONTROL_CTOWS 0x00200000 -#define TRAX_CONTROL_ATID 0x7F000000 /* 2.0+, amtrax */ -#define TRAX_CONTROL_ATID_SHIFT 24 -#define TRAX_CONTROL_ATEN 0x80000000 /* 2.0+, amtrax */ - -#define TRAX_CONTROL_PTOWS_ER 0x00020000 /* For 3.0 */ -#define TRAX_CONTROL_CTOWT_ER 0x00100000 /* For 3.0 */ - -#define TRAX_CONTROL_ITCTO 0x00400000 /* For 3.0 */ -#define TRAX_CONTROL_ITCTIA 0x00800000 /* For 3.0 */ -#define TRAX_CONTROL_ITATV 0x01000000 /* For 3.0 */ - - -/* TRAX Status register fields: */ -#define TRAX_STATUS_TRACT 0x00000001 -#define TRAX_STATUS_TRIG 0x00000002 -#define TRAX_STATUS_PCMTG 0x00000004 -#define TRAX_STATUS_BUSY 0x00000008 /* ER ??? */ -#define TRAX_STATUS_PTITG 0x00000010 -#define TRAX_STATUS_CTITG 0x00000020 -#define TRAX_STATUS_MEMSZ 0x00001F00 -#define TRAX_STATUS_MEMSZ_SHIFT 8 -#define TRAX_STATUS_PTO 0x00010000 -#define TRAX_STATUS_CTO 0x00020000 - -#define TRAX_STATUS_ITCTOA 0x00400000 /* For 3.0 */ -#define TRAX_STATUS_ITCTI 0x00800000 /* For 3.0 */ -#define TRAX_STATUS_ITATR 0x01000000 /* For 3.0 */ - - -/* TRAX Address register fields: */ -#define TRAX_ADDRESS_TWSAT 0x80000000 -#define TRAX_ADDRESS_TWSAT_SHIFT 31 -#define TRAX_ADDRESS_TOTALMASK 0x00FFFFFF -// !!! VUakiVU. added for new TRAX: -#define TRAX_ADDRESS_WRAPCNT 0x7FE00000 /* version ???... */ -#define TRAX_ADDRESS_WRAP_SHIFT 21 - -/* TRAX PCMatch register fields: */ -#define TRAX_PCMATCH_PCML 0x0000001F -#define TRAX_PCMATCH_PCML_SHIFT 0 -#define TRAX_PCMATCH_PCMS 0x80000000 - -/* Compute trace ram buffer size (in bytes) from status register: */ -#define TRAX_MEM_SIZE(status) (1L << (((status) & TRAX_STATUS_MEMSZ) >> TRAX_STATUS_MEMSZ_SHIFT)) - -#if 0 -/* Describes a field within a register: */ -typedef struct { - const char* name; -// unsigned width; -// unsigned shift; - char width; - char shift; - char visible; /* 0 = internal use only, 1 = shown */ - char reserved; -} trax_regfield_t; -#endif - -/* Describes a TRAX register: */ -typedef struct { - const char* name; - unsigned id; - char width; - char visible; - char writable; - char reserved; - //const trax_regfield_t * fieldset; -} trax_regdef_t; - - -extern const trax_regdef_t trax_reglist[]; -extern const signed char trax_readable_regs[]; - -#ifdef __cplusplus -extern "C" { -#endif - -/* Prototypes: */ -extern int trax_find_reg(char * regname, char **errmsg); -extern const char * trax_regname(int regno); - -#ifdef __cplusplus -} -#endif - -#endif /* _TRAX_REGISTERS_H_ */ - diff --git a/src/arch/xtensa/include/xtensa/uart-16550.h b/src/arch/xtensa/include/xtensa/uart-16550.h deleted file mode 100644 index c551c64c1375..000000000000 --- a/src/arch/xtensa/include/xtensa/uart-16550.h +++ /dev/null @@ -1,152 +0,0 @@ -/******************************************************************************* - - Copyright (c) 2006-2007 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -uart-16550.h Generic definitions for National Semiconductor 16550 UART - -This is used by board-support-packages with one or more 16550 compatible UARTs. -A BSP provides a base address for each instance of a 16550 UART on the board. - -Note that a 16552 DUART (Dual UART) is simply two instances of a 16550 UART. - -*******************************************************************************/ - -#ifndef _UART_16550_H_ -#define _UART_16550_H_ - -/* C interface to UART registers. */ -struct uart_dev_s { - union { - uart16550_reg_t rxb; /* DLAB=0: receive buffer, read-only */ - uart16550_reg_t txb; /* DLAB=0: transmit buffer, write-only */ - uart16550_reg_t dll; /* DLAB=1: divisor, LS byte latch */ - } w0; - union { - uart16550_reg_t ier; /* DLAB=0: interrupt-enable register */ - uart16550_reg_t dlm; /* DLAB=1: divisor, MS byte latch */ - } w1; - - union { - uart16550_reg_t isr; /* DLAB=0: interrupt status register, read-only */ - uart16550_reg_t fcr; /* DLAB=0: FIFO control register, write-only */ - uart16550_reg_t afr; /* DLAB=1: alternate function register */ - } w2; - - uart16550_reg_t lcr; /* line control-register, write-only */ - uart16550_reg_t mcr; /* modem control-regsiter, write-only */ - uart16550_reg_t lsr; /* line status register, read-only */ - uart16550_reg_t msr; /* modem status register, read-only */ - uart16550_reg_t scr; /* scratch regsiter, read/write */ -}; - - -#define _RXB(u) ((u)->w0.rxb) -#define _TXB(u) ((u)->w0.txb) -#define _DLL(u) ((u)->w0.dll) -#define _IER(u) ((u)->w1.ier) -#define _DLM(u) ((u)->w1.dlm) -#define _ISR(u) ((u)->w2.isr) -#define _FCR(u) ((u)->w2.fcr) -#define _AFR(u) ((u)->w2.afr) -#define _LCR(u) ((u)->lcr) -#define _MCR(u) ((u)->mcr) -#define _LSR(u) ((u)->lsr) -#define _MSR(u) ((u)->msr) -#define _SCR(u) ((u)->scr) - -typedef volatile struct uart_dev_s uart_dev_t; - -/* IER bits */ -#define RCVR_DATA_REG_INTENABLE 0x01 -#define XMIT_HOLD_REG_INTENABLE 0x02 -#define RCVR_STATUS_INTENABLE 0x04 -#define MODEM_STATUS_INTENABLE 0x08 - -/* FCR bits */ -#define _FIFO_ENABLE 0x01 -#define RCVR_FIFO_RESET 0x02 -#define XMIT_FIFO_RESET 0x04 -#define DMA_MODE_SELECT 0x08 -#define RCVR_TRIGGER_LSB 0x40 -#define RCVR_TRIGGER_MSB 0x80 - -/* AFR bits */ -#define AFR_CONC_WRITE 0x01 -#define AFR_BAUDOUT_SEL 0x02 -#define AFR_RXRDY_SEL 0x04 - -/* ISR bits */ -#define INT_STATUS(r) ((r)&1) -#define INT_PRIORITY(r) (((r)>>1)&0x7) - -/* LCR bits */ -#define WORD_LENGTH(n) (((n)-5)&0x3) -#define STOP_BIT_ENABLE 0x04 -#define PARITY_ENABLE 0x08 -#define EVEN_PARITY 0x10 -#define FORCE_PARITY 0x20 -#define XMIT_BREAK 0x40 -#define DLAB_ENABLE 0x80 - -/* MCR bits */ -#define _DTR 0x01 -#define _RTS 0x02 -#define _OP1 0x04 -#define _OP2 0x08 -#define LOOP_BACK 0x10 - -/* LSR Bits */ -#define RCVR_DATA_READY 0x01 -#define OVERRUN_ERROR 0x02 -#define PARITY_ERROR 0x04 -#define FRAMING_ERROR 0x08 -#define BREAK_INTERRUPT 0x10 -#define XMIT_HOLD_EMPTY 0x20 -#define XMIT_EMPTY 0x40 -#define FIFO_ERROR 0x80 -#define RCVR_READY(u) (_LSR(u)&RCVR_DATA_READY) -#define XMIT_READY(u) (_LSR(u)&XMIT_HOLD_EMPTY) - -/* MSR bits */ -#define _RDR 0x01 -#define DELTA_DSR 0x02 -#define DELTA_RI 0x04 -#define DELTA_CD 0x08 -#define _CTS 0x10 -#define _DSR 0x20 -#define _RI 0x40 -#define _CD 0x80 - - -/* Compute 16-bit divisor for baudrate generator, with rounding: */ -#define UART_DIVISOR(clock,baud) (((clock)/16 + (baud)/2)/(baud)) - -/* Prototypes of driver functions */ -extern void uart16550_init( uart_dev_t *u, unsigned baud, unsigned ndata, - unsigned parity, unsigned nstop ); -extern void uart16550_out( uart_dev_t *u, char c ); -extern char uart16550_in( uart_dev_t *u ); -extern unsigned uart16550_measure_sys_clk( uart_dev_t *u ); - -#endif /* _UART_16550_H_ */ diff --git a/src/arch/xtensa/include/xtensa/xdm-regs.h b/src/arch/xtensa/include/xtensa/xdm-regs.h deleted file mode 100644 index c220bc96ca65..000000000000 --- a/src/arch/xtensa/include/xtensa/xdm-regs.h +++ /dev/null @@ -1,534 +0,0 @@ -/* xdm-regs.h - Common register and related definitions for the XDM - (Xtensa Debug Module) */ - -/* Copyright (c) 2016 Cadence Design Systems Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - - -#ifndef _XDM_REGS_H_ -#define _XDM_REGS_H_ - -/* NOTE: This header file is included by C, assembler, and other sources. - So any C-specific or asm-specific content must be appropriately #ifdef'd. */ - - -/* - * XDM registers can be accessed using APB, ERI, or JTAG (via NAR). - * Address offsets for APB and ERI are the same, and for JTAG - * is different (due to the limited 7-bit NAR addressing). - * - * Here, we first provide the constants as APB / ERI address offsets. - * This is necessary for assembler code (which accesses XDM via ERI), - * because complex conversion macros between the two address maps - * don't work in the assembler. - * Conversion macros are used to convert these to/from JTAG (NAR), - * addresses, for software using JTAG. - */ -/* FIXME: maybe provide only MISC+CS registers here, and leave specific - subsystem registers in separate headers? eg. for TRAX, PERF, OCD */ - -/* XDM_.... ERI addr [NAR addr] Description...... */ - -/* TRAX */ -#define XDM_TRAX_ID 0x100000 /*[0x00] ID */ -#define XDM_TRAX_CONTROL 0x100004 /*[0x01] Control */ -#define XDM_TRAX_STATUS 0x100008 /*[0x02] Status */ -#define XDM_TRAX_DATA 0x10000C /*[0x03] Data */ -#define XDM_TRAX_ADDRESS 0x100010 /*[0x04] Address */ -#define XDM_TRAX_TRIGGER 0x100014 /*[0x05] Stop PC */ -#define XDM_TRAX_MATCH 0x100018 /*[0x06] Stop PC Range */ -#define XDM_TRAX_DELAY 0x10001C /*[0x07] Post Stop Trigger Capture Size */ -#define XDM_TRAX_STARTADDR 0x100020 /*[0x08] Trace Memory Start */ -#define XDM_TRAX_ENDADDR 0x100024 /*[0x09] Trace Memory End */ -#define XDM_TRAX_DEBUGPC 0x10003C /*[0x0F] Debug PC */ -#define XDM_TRAX_P4CHANGE 0x100040 /*[0x10] X */ -#define XDM_TRAX_TIME0 0x100040 /*[0x10] First Time Register */ -#define XDM_TRAX_P4REV 0x100044 /*[0x11] X */ -#define XDM_TRAX_TIME1 0x100044 /*[0x11] Second Time Register */ -#define XDM_TRAX_P4DATE 0x100048 /*[0x12] X */ -#define XDM_TRAX_INTTIME_MAX 0x100048 /*[0x12] maximal Value of Timestamp IntTime */ -#define XDM_TRAX_P4TIME 0x10004C /*[0x13] X */ -#define XDM_TRAX_PDSTATUS 0x100050 /*[0x14] Sample of PDebugStatus */ -#define XDM_TRAX_PDDATA 0x100054 /*[0x15] Sample of PDebugData */ -#define XDM_TRAX_STOP_PC 0x100058 /*[0x16] X */ -#define XDM_TRAX_STOP_ICNT 0x10005C /*[0x16] X */ -#define XDM_TRAX_MSG_STATUS 0x100060 /*[0x17] X */ -#define XDM_TRAX_FSM_STATUS 0x100064 /*[0x18] X */ -#define XDM_TRAX_IB_STATUS 0x100068 /*[0x19] X */ -#define XDM_TRAX_STOPCNT 0x10006C /*[0x1A] X */ - -/* Performance Monitoring Counters */ -#define XDM_PERF_PMG 0x101000 /*[0x20] perf. mon. global control register */ -#define XDM_PERF_INTPC 0x101010 /*[0x24] perf. mon. interrupt PC */ -#define XDM_PERF_PM0 0x101080 /*[0x28] perf. mon. counter 0 value */ -#define XDM_PERF_PM1 0x101084 /*[0x29] perf. mon. counter 1 value */ -#define XDM_PERF_PM2 0x101088 /*[0x2A] perf. mon. counter 2 value */ -#define XDM_PERF_PM3 0x10108C /*[0x2B] perf. mon. counter 3 value */ -#define XDM_PERF_PM4 0x101090 /*[0x2C] perf. mon. counter 4 value */ -#define XDM_PERF_PM5 0x101094 /*[0x2D] perf. mon. counter 5 value */ -#define XDM_PERF_PM6 0x101098 /*[0x2E] perf. mon. counter 6 value */ -#define XDM_PERF_PM7 0x10109C /*[0x2F] perf. mon. counter 7 value */ -#define XDM_PERF_PM(n) (0x101080+((n)<<2)) /* perfmon cnt n=0..7 value */ -#define XDM_PERF_PMCTRL0 0x101100 /*[0x30] perf. mon. counter 0 control */ -#define XDM_PERF_PMCTRL1 0x101104 /*[0x31] perf. mon. counter 1 control */ -#define XDM_PERF_PMCTRL2 0x101108 /*[0x32] perf. mon. counter 2 control */ -#define XDM_PERF_PMCTRL3 0x10110C /*[0x33] perf. mon. counter 3 control */ -#define XDM_PERF_PMCTRL4 0x101110 /*[0x34] perf. mon. counter 4 control */ -#define XDM_PERF_PMCTRL5 0x101114 /*[0x35] perf. mon. counter 5 control */ -#define XDM_PERF_PMCTRL6 0x101118 /*[0x36] perf. mon. counter 6 control */ -#define XDM_PERF_PMCTRL7 0x10111C /*[0x37] perf. mon. counter 7 control */ -#define XDM_PERF_PMCTRL(n) (0x101100+((n)<<2)) /* perfmon cnt n=0..7 control */ -#define XDM_PERF_PMSTAT0 0x101180 /*[0x38] perf. mon. counter 0 status */ -#define XDM_PERF_PMSTAT1 0x101184 /*[0x39] perf. mon. counter 1 status */ -#define XDM_PERF_PMSTAT2 0x101188 /*[0x3A] perf. mon. counter 2 status */ -#define XDM_PERF_PMSTAT3 0x10118C /*[0x3B] perf. mon. counter 3 status */ -#define XDM_PERF_PMSTAT4 0x101190 /*[0x3C] perf. mon. counter 4 status */ -#define XDM_PERF_PMSTAT5 0x101194 /*[0x3D] perf. mon. counter 5 status */ -#define XDM_PERF_PMSTAT6 0x101198 /*[0x3E] perf. mon. counter 6 status */ -#define XDM_PERF_PMSTAT7 0x10119C /*[0x3F] perf. mon. counter 7 status */ -#define XDM_PERF_PMSTAT(n) (0x101180+((n)<<2)) /* perfmon cnt n=0..7 status */ - -/* On-Chip-Debug (OCD) */ -#define XDM_OCD_ID 0x102000 /*[0x40] ID register */ -#define XDM_OCD_DCR_CLR 0x102008 /*[0x42] Debug Control reg clear */ -#define XDM_OCD_DCR_SET 0x10200C /*[0x43] Debug Control reg set */ -#define XDM_OCD_DSR 0x102010 /*[0x44] Debug Status reg */ -#define XDM_OCD_DDR 0x102014 /*[0x45] Debug Data reg */ -#define XDM_OCD_DDREXEC 0x102018 /*[0x46] Debug Data reg + execute-DIR */ -#define XDM_OCD_DIR0EXEC 0x10201C /*[0x47] Debug Instruction reg, word 0 + execute-DIR */ -#define XDM_OCD_DIR0 0x102020 /*[0x48] Debug Instruction reg, word 1 */ -#define XDM_OCD_DIR1 0x102024 /*[0x49] Debug Instruction reg, word 2 */ -#define XDM_OCD_DIR2 0x102028 /*[0x4A] Debug Instruction reg, word 3 */ -#define XDM_OCD_DIR3 0x10202C /*[0x49] Debug Instruction reg, word 4 */ -#define XDM_OCD_DIR4 0x102030 /*[0x4C] Debug Instruction reg, word 5 */ -#define XDM_OCD_DIR5 0x102034 /*[0x4D] Debug Instruction reg, word 5 */ -#define XDM_OCD_DIR6 0x102038 /*[0x4E] Debug Instruction reg, word 6 */ -#define XDM_OCD_DIR7 0x10203C /*[0x4F] Debug Instruction reg, word 7 */ - -/* Miscellaneous Registers */ -#define XDM_MISC_PWRCTL 0x103020 /*[0x58] Power and Reset Control */ -#define XDM_MISC_PWRSTAT 0x103024 /*[0x59] Power and Reset Status */ -#define XDM_MISC_ERISTAT 0x103028 /*[0x5A] ERI Transaction Status */ -#define XDM_MISC_DATETIME 0x103034 -#define XDM_MISC_CONFIGID1_V0 0x103034 /*[0x5D] [INTERNAL] ConfigID1 in XDM v0/1 */ -#define XDM_MISC_CONFIGID1_V2 0x10007c /*[0x1F] [INTERNAL] ConfigID1 since XDM v2 */ -#define XDM_MISC_CONFIGID0_V2 0x100078 /*[0x1E] [INTERNAL] ConfigID0 since XDM v2 */ -#define XDM_MISC_UBID 0x103038 /*[0x5E] [INTERNAL] Build Unique ID */ -#define XDM_MISC_CID 0x10303C /*[0x5F] [INTERNAL] Customer ID */ - -/* CoreSight compatibility */ -#define XDM_CS_ITCTRL 0x103F00 /*[0x60] InTegration Mode control reg */ -#define XDM_CS_CLAIMSET 0x103FA0 /*[0x68] Claim Tag Set reg */ -#define XDM_CS_CLAIMCLR 0x103FA4 /*[0x69] Claim Tag Clear reg */ -#define XDM_CS_LOCK_ACCESS 0x103FB0 /*[0x6B] Lock Access (writing 0xC5ACCE55 unlocks) */ -#define XDM_CS_LOCK_STATUS 0x103FB4 /*[0x6D] Lock Status */ -#define XDM_CS_AUTH_STATUS 0x103FB8 /*[0x6E] Authentication Status */ -#define XDM_CS_DEV_ID 0x103FC8 /*[0x72] Device ID */ -#define XDM_CS_DEV_TYPE 0x103FCC /*[0x73] Device Type */ -#define XDM_CS_PER_ID4 0x103FD0 /*[0x74] Peripheral ID reg byte 4 */ -#define XDM_CS_PER_ID5 0x103FD4 /*[0x75] Peripheral ID reg byte 5 */ -#define XDM_CS_PER_ID6 0x103FD8 /*[0x76] Peripheral ID reg byte 6 */ -#define XDM_CS_PER_ID7 0x103FDC /*[0x77] Peripheral ID reg byte 7 */ -#define XDM_CS_PER_ID0 0x103FE0 /*[0x78] Peripheral ID reg byte 0 */ -#define XDM_CS_PER_ID1 0x103FE4 /*[0x79] Peripheral ID reg byte 1 */ -#define XDM_CS_PER_ID2 0x103FE8 /*[0x7A] Peripheral ID reg byte 2 */ -#define XDM_CS_PER_ID3 0x103FEC /*[0x7B] Peripheral ID reg byte 3 */ -#define XDM_CS_COMP_ID0 0x103FF0 /*[0x7C] Component ID reg byte 0 */ -#define XDM_CS_COMP_ID1 0x103FF4 /*[0x7D] Component ID reg byte 1 */ -#define XDM_CS_COMP_ID2 0x103FF8 /*[0x7E] Component ID reg byte 2 */ -#define XDM_CS_COMP_ID3 0x103FFC /*[0x7F] Component ID reg byte 3 */ - -#define CS_PER_ID0 0x00000003 -#define CS_PER_ID1 0x00000021 -#define CS_PER_ID2 0x0000000f -#define CS_PER_ID3 0x00000000 -#define CS_PER_ID4 0x00000024 - -#define CS_COMP_ID0 0x0000000d -#define CS_COMP_ID1 0x00000090 -#define CS_COMP_ID2 0x00000005 -#define CS_COMP_ID3 0x000000b1 - -#define CS_DEV_TYPE 0x00000015 - -#define XTENSA_IDCODE 0x120034e5 // FIXME (upper bits not spec. out but BE is !) -#define XTENSA_MFC_ID (XTENSA_IDCODE & 0xFFF) -#define CS_DEV_ID XTENSA_IDCODE //FIXME - for XDM v0 only, for v2 is the new ID, that includes vars like PRID but also can be custom -#define CS_DEV_ID_v0_MASK 0x00000FFF // can compare only the lower 12 bits -#define CS_DEV_ID_v2_MASK 0xF0000000 // can compare only the upper 4 bits - -#define NXS_OCD_REG(val) ((val >= 0x40) && (val <= 0x5F)) -#define NXS_TRAX_REG(val) val <= 0x3F - -#define ERI_TRAX_REG(val) ((val & 0xFFFF) < 0x1000) -#define ERI_OCD_REG(val) ((val & 0xFFFF) >= 0x2000) && ((val & 0xFFFF) < 0x4000)) - -/* Convert above 14-bit ERI/APB address/offset to 7-bit NAR address: */ -#define _XDM_ERI_TO_NAR(a) ( ((a)&0x3F80)==0x0000 ? (((a)>>2) & 0x1F) \ - : ((a)&0x3E00)==0x1000 ? (0x20 | (((a)>>2) & 7) | (((a)>>4) & 0x18)) \ - : ((a)&0x3FC0)==0x2000 ? (0x40 | (((a)>>2) & 0xF)) \ - : ((a)&0x3FE0)==0x3020 ? (0x50 | (((a)>>2) & 0xF)) \ - : ((a)&0x3FFC)==0x3F00 ? 0x60 \ - : ((a)&0x3F80)==0x3F80 ? (0x60 | (((a)>>2) & 0x1F)) \ - : -1 ) - -#define XDM_ERI_TO_NAR(a) _XDM_ERI_TO_NAR(a & 0xFFFF) - -/* Convert 7-bit NAR address back to ERI/APB address/offset: */ -#define _XDM_NAR_TO_APB(a) ((a) <= 0x1f ? ((a) << 2) \ - :(a) >= 0x20 && (a) <= 0x3F ? (0x1000 | (((a)& 7) << 2) | (((a)&0x18)<<4)) \ - :(a) >= 0x40 && (a) <= 0x4F ? (0x2000 | (((a)&0xF) << 2)) \ - :(a) >= 0x58 && (a) <= 0x5F ? (0x3000 | (((a)&0xF) << 2)) \ - :(a) == 0x60 ? (0x3F00) \ - :(a) >= 0x68 && (a) <= 0x7F ? (0x3F80 | (((a)&0x1F) << 2)) \ - : -1) - -#define XDM_NAR_TO_APB(a) _XDM_NAR_TO_APB((a & 0xFFFF)) -#define XDM_NAR_TO_ERI(a) _XDM_NAR_TO_APB((a & 0xFFFF)) | 0x100000 - -/* Convert APB to ERI address */ -#define XDM_APB_TO_ERI(a) ((a) | (0x100000)) -#define XDM_ERI_TO_APB(a) ((a) & (0x0FFFFF)) - -/*********** Bit definitions within some of the above registers ***********/ -#define OCD_ID_LSDDRP 0x01000000 -#define OCD_ID_LSDDRP_SHIFT 24 -#define OCD_ID_ENDIANESS 0x00000001 -#define OCD_ID_ENDIANESS_SHIFT 0 -#define OCD_ID_PSO 0x0000000C -#define OCD_ID_PSO_SHIFT 2 -#define OCD_ID_TRACEPORT 0x00000080 -#define OCD_ID_TRACEPORT_SHIFT 7 - -#define OCD_ID_LSDDRP_XEA3 0x00000400 - -/* Power Status register. NOTE: different bit positions in JTAG vs. ERI/APB !! */ -/* ERI/APB: */ -#define PWRSTAT_CORE_DOMAIN_ON 0x00000001 /* set if core is powered on */ -#define PWRSTAT_CORE_DOMAIN_ON_SHIFT 0 -#define PWRSTAT_WAKEUP_RESET 0x00000002 /* [ERI only] 0=cold start, 1=PSO wakeup */ -#define PWRSTAT_WAKEUP_RESET_SHIFT 1 -#define PWRSTAT_CACHES_LOST_POWER 0x00000004 /* [ERI only] set if caches (/localmems?) lost power */ - /* FIXME: does this include local memories? */ -#define PWRSTAT_CACHES_LOST_POWER_SHIFT 2 -#define PWRSTAT_CORE_STILL_NEEDED 0x00000010 /* set if others keeping core awake */ -#define PWRSTAT_CORE_STILL_NEEDED_SHIFT 4 -#define PWRSTAT_MEM_DOMAIN_ON 0x00000100 /* set if memory domain is powered on */ -#define PWRSTAT_MEM_DOMAIN_ON_SHIFT 8 -#define PWRSTAT_DEBUG_DOMAIN_ON 0x00001000 /* set if debug domain is powered on */ -#define PWRSTAT_DEBUG_DOMAIN_ON_SHIFT 12 -#define PWRSTAT_ALL_ON (PWRSTAT_CORE_DOMAIN_ON | PWRSTAT_MEM_DOMAIN_ON | PWRSTAT_DEBUG_DOMAIN_ON) -#define PWRSTAT_CORE_WAS_RESET 0x00010000 /* [APB only] set if core got reset */ -#define PWRSTAT_CORE_WAS_RESET_SHIFT 16 -#define PWRSTAT_DEBUG_WAS_RESET 0x10000000 /* set if debug module got reset */ -#define PWRSTAT_DEBUG_WAS_RESET_SHIFT 28 -/* JTAG: */ -#define J_PWRSTAT_CORE_DOMAIN_ON 0x01 /* set if core is powered on */ -#define J_PWRSTAT_MEM_DOMAIN_ON 0x02 /* set if memory domain is powered on */ -#define J_PWRSTAT_DEBUG_DOMAIN_ON 0x04 /* set if debug domain is powered on */ -#define J_PWRSTAT_ALL_ON (J_PWRSTAT_CORE_DOMAIN_ON | J_PWRSTAT_MEM_DOMAIN_ON | J_PWRSTAT_DEBUG_DOMAIN_ON) -#define J_PWRSTAT_CORE_STILL_NEEDED 0x08 /* set if others keeping core awake */ -#define J_PWRSTAT_CORE_WAS_RESET 0x10 /* set if core got reset */ -#define J_PWRSTAT_DEBUG_WAS_RESET 0x40 /* set if debug module got reset */ - -/* Power Control register. NOTE: different bit positions in JTAG vs. ERI/APB !! */ -/* ERI/APB: */ -#define PWRCTL_CORE_SHUTOFF 0x00000001 /* [ERI only] core wants to shut off on WAITI */ -#define PWRCTL_CORE_SHUTOFF_SHIFT 0 -#define PWRCTL_CORE_WAKEUP 0x00000001 /* [APB only] set to force core to stay powered on */ -#define PWRCTL_CORE_WAKEUP_SHIFT 0 -#define PWRCTL_MEM_WAKEUP 0x00000100 /* set to force memory domain to stay powered on */ -#define PWRCTL_MEM_WAKEUP_SHIFT 8 -#define PWRCTL_DEBUG_WAKEUP 0x00001000 /* set to force debug domain to stay powered on */ -#define PWRCTL_DEBUG_WAKEUP_SHIFT 12 -#define PWRCTL_ALL_ON (PWRCTL_CORE_WAKEUP | PWRCTL_MEM_WAKEUP | PWRCTL_DEBUG_WAKEUP) -#define PWRCTL_CORE_RESET 0x00010000 /* [APB only] set to assert core reset */ -#define PWRCTL_CORE_RESET_SHIFT 16 -#define PWRCTL_DEBUG_RESET 0x10000000 /* set to assert debug module reset */ -#define PWRCTL_DEBUG_RESET_SHIFT 28 -/* JTAG: */ -#define J_PWRCTL_CORE_WAKEUP 0x01 /* set to force core to stay powered on */ -#define J_PWRCTL_MEM_WAKEUP 0x02 /* set to force memory domain to stay powered on */ -#define J_PWRCTL_DEBUG_WAKEUP 0x04 /* set to force debug domain to stay powered on */ -#define J_DEBUG_USE 0x80 /* */ -#define J_PWRCTL_ALL_ON (J_DEBUG_USE | J_PWRCTL_CORE_WAKEUP | J_PWRCTL_MEM_WAKEUP | J_PWRCTL_DEBUG_WAKEUP) -#define J_PWRCTL_DEBUG_ON J_DEBUG_USE | J_PWRCTL_DEBUG_WAKEUP -#define J_PWRCTL_CORE_RESET 0x10 /* set to assert core reset */ -#define J_PWRCTL_DEBUG_RESET 0x40 /* set to assert debug module reset */ - -#define J_PWRCTL_WRITE_MASK 0xFF -#define J_PWRSTAT_WRITE_MASK 0xFF - -#define PWRCTL_WRITE_MASK ~0 -#define PWRSTAT_WRITE_MASK ~0 - -/************ The following are only relevant for JTAG, so perhaps belong in OCD only **************/ - -/* XDM 5-bit JTAG Instruction Register (IR) values: */ -#define XDM_IR_PWRCTL 0x08 /* select 8-bit Power/Reset Control (PRC) */ -#define XDM_IR_PWRSTAT 0x09 /* select 8-bit Power/Reset Status (PRS) */ -#define XDM_IR_NAR_SEL 0x1c /* select altern. 8-bit NAR / 32-bit NDR (Nexus-style) */ -#define XDM_IR_NDR_SEL 0x1d /* select altern. 32-bit NDR / 8-bit NAR - (FIXME - functionality not yet in HW) */ -#define XDM_IR_IDCODE 0x1e /* select 32-bit JTAG IDCODE */ -#define XDM_IR_BYPASS 0x1f /* select 1-bit bypass */ - -#define XDM_IR_WIDTH 5 /* width of IR for Xtensa TAP */ - -/* NAR register bits: */ -#define XDM_NAR_WRITE 0x01 -#define XDM_NAR_ADDR_MASK 0xFE -#define XDM_NAR_ADDR_SHIFT 1 - -#define XDM_NAR_BUSY 0x02 -#define XDM_NAR_ERROR 0x01 - -#define NEXUS_DIR_READ 0x00 -#define NEXUS_DIR_WRITE 0x01 - -/************ Define DCR register bits **************/ - -#define DCR_ENABLEOCD 0x0000001 -#define DCR_ENABLEOCD_SHIFT 0 -#define DCR_DEBUG_INT 0x0000002 -#define DCR_DEBUG_INT_SHIFT 1 -#define DCR_DEBUG_OVERRIDE 0x0000004 -#define DCR_DEBUG_OVERRIDE_SHIFT 2 -#define DCR_DEBUG_SS_REQ 0x0000008 -#define DCR_DEBUG_SS_REQ_SHIFT 3 -#define DCR_MASK_NMI 0x0000020 -#define DCR_MASK_NMI_SHIFT 5 -#define DCR_STEP_ENABLE 0x0000040 -#define DCR_STEP_ENABLE_SHIFT 6 -#define DCR_BREAK_IN_EN 0x0010000 -#define DCR_BREAK_IN_EN_SHIFT 16 -#define DCR_BREAK_OUT_EN 0x0020000 -#define DCR_BREAK_OUT_EN_SHIFT 17 -#define DCR_DEBUG_INT_EN 0x0040000 -#define DCR_DEBUG_INT_EN_SHIFT 18 -#define DCR_DBG_SW_ACTIVE 0x0100000 -#define DCR_DBG_SW_ACTIVE_SHIFT 20 -#define DCR_STALL_IN_EN 0x0200000 -#define DCR_STALL_IN_EN_SHIFT 21 -#define DCR_DEBUG_OUT_EN 0x0400000 -#define DCR_DEBUG_OUT_EN_SHIFT 22 -#define DCR_BREAK_OUT_ITO 0x1000000 -#define DCR_STALL_OUT_ITO 0x2000000 -#define DCR_STALL_OUT_ITO_SHIFT 25 - -/************ Define DSR register bits **************/ - -#define DOSR_STOP_CAUSE_SHIFT 5 -#define DOSR_STOP_CAUSE_MASK 0xF - -#define DOSR_EXECDONE_SHIFT 0 -#define DOSR_EXECDONE_ER 0x01 -#define DOSR_EXECDONE_SHIFT 0 -#define DOSR_EXCEPTION_ER 0x02 -#define DOSR_EXCEPTION_SHIFT 1 -#define DOSR_BUSY 0x04 -#define DOSR_BUSY_SHIFT 2 -#define DOSR_OVERRUN 0x08 -#define DOSR_OVERRUN_SHIFT 3 -#define DOSR_INOCDMODE_ER 0x10 -#define DOSR_INOCDMODE_SHIFT 4 -#define DOSR_CORE_WROTE_DDR_ER 0x400 -#define DOSR_CORE_WROTE_DDR_SHIFT 10 -#define DOSR_CORE_READ_DDR_ER 0x800 -#define DOSR_CORE_READ_DDR_SHIFT 11 -#define DOSR_HOST_WROTE_DDR_ER 0x4000 -#define DOSR_HOST_WROTE_DDR_SHIFT 14 -#define DOSR_HOST_READ_DDR_ER 0x8000 -#define DOSR_HOST_READ_DDR_SHIFT 15 - -#define DOSR_DEBUG_PEND_BIN 0x10000 -#define DOSR_DEBUG_PEND_HOST 0x20000 -#define DOSR_DEBUG_PEND_TRAX 0x40000 -#define DOSR_DEBUG_BIN 0x100000 -#define DOSR_DEBUG_HOST 0x200000 -#define DOSR_DEBUG_TRAX 0x400000 -#define DOSR_DEBUG_PEND_BIN_SHIFT 16 -#define DOSR_DEBUG_PEND_HOST_SHIFT 17 -#define DOSR_DEBUG_PEND_TRAX_SHIFT 18 -#define DOSR_DEBUG_BREAKIN 0x0100000 -#define DOSR_DEBUG_BREAKIN_SHIFT 20 -#define DOSR_DEBUG_HOST_SHIFT 21 -#define DOSR_DEBUG_TRAX_SHIFT 22 - -#define DOSR_DEBUG_STALL 0x1000000 -#define DOSR_DEBUG_STALL_SHIFT 24 - -#define DOSR_CORE_ON 0x40000000 -#define DOSR_CORE_ON_SHIFT 30 -#define DOSR_DEBUG_ON 0x80000000 -#define DOSR_DEBUG_ON_SHIFT 31 - -/********** Performance monitor registers bits **********/ - -#define PERF_PMG_ENABLE 0x00000001 /* global enable bit */ -#define PERF_PMG_ENABLE_SHIFT 0 - -#define PERF_PMCTRL_INT_ENABLE 0x00000001 /* assert interrupt on overflow */ -#define PERF_PMCTRL_INT_ENABLE_SHIFT 0 -#define PERF_PMCTRL_KRNLCNT 0x00000008 /* ignore TRACELEVEL */ -#define PERF_PMCTRL_KRNLCNT_SHIFT 3 -#define PERF_PMCTRL_TRACELEVEL 0x000000F0 /* count when CINTLEVEL <= TRACELEVEL */ -#define PERF_PMCTRL_TRACELEVEL_SHIFT 4 -#define PERF_PMCTRL_SELECT 0x00001F00 /* events group selector */ -#define PERF_PMCTRL_SELECT_SHIFT 8 -#define PERF_PMCTRL_MASK 0xFFFF0000 /* events mask */ -#define PERF_PMCTRL_MASK_SHIFT 16 - -#define PERF_PMSTAT_OVERFLOW 0x00000001 /* counter overflowed */ -#define PERF_PMSTAT_OVERFLOW_SHIFT 0 -#define PERF_PMSTAT_INT 0x00000010 /* interrupt asserted */ -#define PERF_PMSTAT_INT_SHIFT 4 - -#if defined (USE_XDM_REGNAME) || defined (USE_DAP_REGNAME) -/* Describes XDM register: */ -typedef struct { - int reg; - char* name; -} regdef_t; - -/* - * Returns the name of the specified XDM register number, - * or simply "???" if the register number is not recognized. - * FIXME - requires -1 as the last entry - change to compare the name to ??? - * or even better, make the code above to work. - */ -static char* -regname(regdef_t* list, int reg) -{ - int i = 0; - while (list[i].reg != -1) { - if (list[i].reg == reg) - break; - i++; - } - return list[i].name; -} - -#if defined (USE_XDM_REGNAME) -static regdef_t xdm_reglist[] = -{ - {XDM_OCD_DSR ,"DOSR" }, - {XDM_OCD_DDR ,"DDR" }, - {XDM_OCD_DDREXEC ,"DDREXEC" }, - {XDM_OCD_DIR0EXEC ,"DIR0EXEC"}, - {XDM_OCD_DCR_CLR ,"DCR_CLR" }, - {XDM_OCD_DCR_SET ,"DCR_SET" }, - {XDM_TRAX_CONTROL ,"CONTROL" }, - {XDM_TRAX_STATUS ,"STATUS" }, - {XDM_TRAX_DATA ,"DATA" }, - {XDM_TRAX_ADDRESS ,"ADDRESS" }, - - {XDM_TRAX_ID ,"TRAX_ID" }, - - {XDM_TRAX_TRIGGER ,"TRIGGER PC" }, - {XDM_TRAX_MATCH ,"PC MATCH" }, - {XDM_TRAX_DELAY ,"DELAY CNT." }, - {XDM_TRAX_STARTADDR ,"START ADDRESS"}, - {XDM_TRAX_ENDADDR ,"END ADDRESS" }, - {XDM_TRAX_DEBUGPC ,"DEBUG PC" }, - {XDM_TRAX_P4CHANGE ,"P4 CHANGE" }, - {XDM_TRAX_P4REV ,"P4 REV." }, - {XDM_TRAX_P4DATE ,"P4 DATE" }, - {XDM_TRAX_P4TIME ,"P4 TIME" }, - {XDM_TRAX_PDSTATUS ,"PD STATUS" }, - {XDM_TRAX_PDDATA ,"PD DATA" }, - {XDM_TRAX_STOP_PC ,"STOP PC" }, - {XDM_TRAX_STOP_ICNT ,"STOP ICNT" }, - {XDM_TRAX_MSG_STATUS,"MSG STAT." }, - {XDM_TRAX_FSM_STATUS,"FSM STAT." }, - {XDM_TRAX_IB_STATUS ,"IB STAT." }, - - {XDM_OCD_ID ,"OCD_ID" }, - {XDM_OCD_DIR0 ,"DIR0" }, - {XDM_OCD_DIR1 ,"DIR1" }, - {XDM_OCD_DIR2 ,"DIR2" }, - {XDM_OCD_DIR3 ,"DIR3" }, - {XDM_OCD_DIR4 ,"DIR4" }, - {XDM_OCD_DIR5 ,"DIR5" }, - {XDM_OCD_DIR6 ,"DIR6" }, - {XDM_OCD_DIR7 ,"DIR7" }, - - {XDM_PERF_PMG ,"PMG" }, - {XDM_PERF_INTPC ,"INTPC" }, - {XDM_PERF_PM0 ,"PM0 " }, - {XDM_PERF_PM1 ,"PM1 " }, - {XDM_PERF_PM2 ,"PM2 " }, - {XDM_PERF_PM3 ,"PM3 " }, - {XDM_PERF_PM4 ,"PM4 " }, - {XDM_PERF_PM5 ,"PM5 " }, - {XDM_PERF_PM6 ,"PM6 " }, - {XDM_PERF_PM7 ,"PM7 " }, - {XDM_PERF_PMCTRL0 ,"PMCTRL0"}, - {XDM_PERF_PMCTRL1 ,"PMCTRL1"}, - {XDM_PERF_PMCTRL2 ,"PMCTRL2"}, - {XDM_PERF_PMCTRL3 ,"PMCTRL3"}, - {XDM_PERF_PMCTRL4 ,"PMCTRL4"}, - {XDM_PERF_PMCTRL5 ,"PMCTRL5"}, - {XDM_PERF_PMCTRL6 ,"PMCTRL6"}, - {XDM_PERF_PMCTRL7 ,"PMCTRL7"}, - {XDM_PERF_PMSTAT0 ,"PMSTAT0"}, - {XDM_PERF_PMSTAT1 ,"PMSTAT1"}, - {XDM_PERF_PMSTAT2 ,"PMSTAT2"}, - {XDM_PERF_PMSTAT3 ,"PMSTAT3"}, - {XDM_PERF_PMSTAT4 ,"PMSTAT4"}, - {XDM_PERF_PMSTAT5 ,"PMSTAT5"}, - {XDM_PERF_PMSTAT6 ,"PMSTAT6"}, - {XDM_PERF_PMSTAT7 ,"PMSTAT7"}, - - {XDM_MISC_PWRCTL ,"PWRCTL" }, - {XDM_MISC_PWRSTAT ,"PWRSTAT" }, - {XDM_MISC_ERISTAT ,"ERISTAT" }, - {XDM_MISC_DATETIME ,"DATETIME"}, - {XDM_MISC_UBID ,"UBID" }, - {XDM_MISC_CID ,"CID" }, - - {XDM_CS_ITCTRL ,"ITCTRL" }, - {XDM_CS_CLAIMSET ,"CLAIMSET" }, - {XDM_CS_CLAIMCLR ,"CLAIMCLR" }, - {XDM_CS_LOCK_ACCESS ,"LOCK_ACCESS"}, - {XDM_CS_LOCK_STATUS ,"LOCK_STATUS"}, - {XDM_CS_AUTH_STATUS ,"AUTH_STATUS"}, - {XDM_CS_DEV_ID ,"DEV_ID" }, - {XDM_CS_DEV_TYPE ,"DEV_TYPE" }, - {XDM_CS_PER_ID4 ,"PER_ID4" }, - {XDM_CS_PER_ID5 ,"PER_ID5" }, - {XDM_CS_PER_ID6 ,"PER_ID6" }, - {XDM_CS_PER_ID7 ,"PER_ID7" }, - {XDM_CS_PER_ID0 ,"PER_ID0" }, - {XDM_CS_PER_ID1 ,"PER_ID1" }, - {XDM_CS_PER_ID2 ,"PER_ID2" }, - {XDM_CS_PER_ID3 ,"PER_ID3" }, - {XDM_CS_COMP_ID0 ,"COMP_ID0" }, - {XDM_CS_COMP_ID1 ,"COMP_ID1" }, - {XDM_CS_COMP_ID2 ,"COMP_ID2" }, - {XDM_CS_COMP_ID3 ,"COMP_ID3" }, - {-1 ,"???" }, -}; -#endif - -#endif - -#endif /* _XDM_REGS_H_ */ diff --git a/src/arch/xtensa/include/xtensa/xmon.h b/src/arch/xtensa/include/xtensa/xmon.h deleted file mode 100644 index 34c90dc92902..000000000000 --- a/src/arch/xtensa/include/xtensa/xmon.h +++ /dev/null @@ -1,156 +0,0 @@ -/* xmon.h - XMON definitions - * - * $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xmon/xmon.h#1 $ - * - * Copyright (c) 2001-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef __H_XMON -#define __H_XMON - -#ifndef UCHAR -# define UCHAR unsigned char -#endif - -#ifndef C_UCHAR -# define C_UCHAR const unsigned char -#endif - -#ifndef UINT32 -# define UINT32 unsigned int -#endif - -/* Default GDB packet size */ -#define GDB_PKT_SIZE 4096 - -/*XMON signals */ -#define XMON_SIGINT 2 /*target was interrupted */ -#define XMON_SIGILL 4 /*illegal instruction */ -#define XMON_SIGTRAP 5 /*general exception */ -#define XMON_SIGSEGV 11 /*page faults */ - - -/* Type of log message from XMON to the application */ -typedef enum { - XMON_LOG, - XMON_TRACE, - XMON_ERR, -} xmon_log_t; - - -#ifdef _cplusplus -extern "C" { -#endif - -/* - * THE FOLLOWING ROUTINES ARE USED BY THE USER - */ - -/** - * Initialize XMON so GDB can attach. - * gdbBuf - pointer to a buffer XMON uses to comm. with GDB - * gdbPktSize - Size of the allocated buffer for GDB communication. - * xlog - log handler for XMON produced errors/logs/traces - - */ -extern int -_xmon_init(char* gdbBuf, int gdbPktSize, - void(*xlog)(xmon_log_t type, const char* str)); - -/** - * Detach from XMON. Can execute at any time - */ -extern void -_xmon_close(void); - -/** - * Print message to GDB - */ -extern void -_xmon_consoleString(const char* str); - -/** - * XMON version - */ -extern const char* -_xmon_version(); - -/** - * Enable disable various logging and tracing chains - * app_log_en - enable/disable logging to the app log handler. - * ENABLED BY DEFAULT. - * app_trace_en - enable/disable tracing to the app log handler. - * DISABLED BY DEFAULT. - * gdb_log_en - enable/disable log notifications to the GDB. - * ENABLED BY DEFAULT. - * gdb_trace_en - enable/disable tracing notifications to the GDB. - * DISABLED BY DEFAULT. - */ -extern void -_xmon_log(char app_log_en, char app_trace_en, - char gdb_log_en, char gdb_trace_en); - -//extern int -//_xmon_process_packet (int len, char* buf); - -//extern int -//_xmon_process_packet2 (void); - -/* - * THE FOLLOWING ROUTINES NEED TO BE PROVIDED BY USER - */ - -/* - * Receive remote packet bytes from GDB - * wait: If the function would block waiting for more - * characters from gdb, wait=0 instructs it to - * return 0 immediatelly. Otherwise, if wait=1, - * the function may or may not wait for GDB. - * NOTE: Current XMON version supports single char - * input only (return value is 1 always) - * buf: Pointer to the buffer for the received data. - * Returns: 0 - no data avaiable, - >0 - length of received array in buf. - */ -extern int -_xmon_in(int wait, UCHAR* buf); - -/* - * Output an array of chars to GDB - * len - number of chars in the array - */ -extern void -_xmon_out(int len, UCHAR*); - -/* - * Flush output characthers - * XMON invokes this one when a full response is ready - */ -extern int -_xmon_flush(void); // flush output characters - -#ifdef _cplusplus -} -#endif - - -#endif diff --git a/src/arch/xtensa/include/xtensa/xtbsp.h b/src/arch/xtensa/include/xtensa/xtbsp.h deleted file mode 100644 index 0781e4154f4f..000000000000 --- a/src/arch/xtensa/include/xtensa/xtbsp.h +++ /dev/null @@ -1,287 +0,0 @@ -/******************************************************************************* - - Copyright (c) 2006-2009 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -xtbsp.h Xtensa Board Support Package API - -This API defines a minimal set of board-support functions that every supported -Xtensa board is expected to provide in the board-support-package (BSP) library -associated with the board-specific LSP. Only basic board functions are provided -in this board-independent API. API functions not applicable to a board must be -stubbed in its BSP library. More complex operations must use a board-specific -interface. Functions are grouped by type of peripheral device. - -*******************************************************************************/ - -#ifndef _XTBSP_H_ -#define _XTBSP_H_ - - -#ifdef __cplusplus -extern "C" { -#endif - - -/******************************************************************************* -BOARD INITIALIZATION. -The board with all its devices is initialized by xtbsp_board_init(). -Individual devices represented by this API can be reinitialized at any -time by calling their inidividual device init functions (grouped with -other device functions). This might be useful to (say) change the baud -rate of the UART. -*/ - - -/* -Initialize the board. Must call before any other API function. -Iniitializes BSP, board in general, and all devices on the board. -*/ -extern void xtbsp_board_init(void); - - -/******************************************************************************* -BOARD CHARACTERISTICS and CONVENIENCE FUNCTIONS. -Board support functions not associated with a particular peripheral device. -*/ - -/* -Return a short string representing the type of board. -If the board has a display, the string must fit on a single line. -*/ -extern const char * xtbsp_board_name(void); - -/* -Hardware reset the entire board (if possible). Does not return if successful. -If this function returns, it is stubbed out or not possible with this board. -*/ -extern void xtbsp_board_reset(void); - -/* -Return the clock frequency in Hertz. May be constant or computed. -*/ -extern unsigned xtbsp_clock_freq_hz(void); - -/* -Return the clock period in picoseconds. May be constant or computed. -*/ -extern unsigned xtbsp_clock_period_ps(void); - -/* -Spin (at least) a number of cycles per the processor's CCOUNT register. -Unlike a s/w delay loop, the duration is not affected by compiler -optimization or interrupts completed within the delay period. -If the processor doesn't have CCOUNT, a s/w delay loop is used to obtain -a rough approximation of the cycle count. -*/ -extern void xtbsp_delay_cycles(unsigned cycles); - -/* -Spin at least a number of nanoseconds (approximate and err in the high side). -BSP implementation should do this efficiently, avoiding integer overflow or -excessive loss of precision, run-time division or floating point. -Function implementation (vs. macro) allows BSP to optimize for the clock -frequency by pre-computing (or using constant) scale factors. -*/ -extern void xtbsp_delay_ns(unsigned ns); - - -/******************************************************************************* -C LIBRARY SUPPORT. -These functions are called by the C library libgloss interface. -Their names are predetermined apart from this BSP API. -*/ - -/* -Initialize the board. Called by C library initialization code. -Usually simply calls xtbsp_board_init(). -*/ -extern void board_init(void); - -/* -(Wait for and) Input a single byte from the default character I/O -device. Return -1 if there is no input device. -This device is usually a UART and this function calls xtbsp_uart_getchar(). -On some boards (eg.) it might be a directly connected keyboard. -*/ -extern int inbyte(void); - -/* -Output a single char to the default character I/O device (and wait -until it's been taken). -This device is usually a UART and this function calls xtbsp_uart_putchar(). -On some boards (eg.) it might be a directly connected bit-mapped screen. -*/ -extern void outbyte(int c); - - -/******************************************************************************* -UART (SERIAL I/O). -Supports a single UART in a simple polling mode and provides control of -receiver and transmitter data interrupts (client must provide handler). -Provides a mapping to processor interrupt number which can be used with -the HAL to control processor interrupt enable (INTENABLE) etc. -*/ - -/* Bitmasks to identify UART interrupts. */ -typedef enum xtbsp_uart_int { - xtbsp_uart_int_rx = 1<<0, - xtbsp_uart_int_tx = 1<<1, - /* mask of all valid interrupt bits */ - xtbsp_uart_int_all = (1<<2)-1 -} xtbsp_uart_int; - -/* -Return non-zero if the board has a UART. -*/ -extern int xtbsp_uart_exists(void); - -/* -Initialize the UART: - parity = 0 (none), 1 (odd), or 2 (even). - nstop = 1 or 2 (stop bits). - ndata = 7 or 8 (data bits). -Disables all UART interrupts. -Returns non-zero if failed (perhaps due to unsupported parameter values). -Must call before any of the following functions. -*/ -extern int xtbsp_uart_init(unsigned baud, unsigned ndata, - unsigned parity, unsigned nstop); -#define xtbsp_uart_init_default() xtbsp_uart_init(38400, 8, 0, 1) - -/* - Extended init routine for 'portable' UART. Allows specifying the UART - base address and interrupt number (if interrupt operation desired). - Meant for use on platforms other than the FPGA boards (e.g. XTSC sim - or Palladium). - - NOTE: Using the 'portable' UART may require that you implement function - xtbsp_clock_freq_hz() if your target platform does not provide it. - We recommend you implement it as a weak function so that it can be - overridden by the target platform implementation if available. - */ -extern int xtbsp_uart_init_ex(unsigned uart_baseaddr, - unsigned uart_intnum, - unsigned baud, - unsigned ndata, - unsigned parity, - unsigned nstop); - -/* -(Wait for and) Input a single char from the UART. -Any pending xtbsp_uart_int_rx interrupt is cleared. -*/ -extern char xtbsp_uart_getchar(void); - -/* -(Wait for transmitter ready and) Output a single char to the UART. -Any pending xtbsp_uart_int_tx interrupt is cleared. -*/ -extern void xtbsp_uart_putchar(const char c); - -/* -Return true (non-zero) if a character has been received and is ready -to be input by xtbsp_uart_getchar() without waiting, else return 0. -*/ -extern int xtbsp_uart_get_isready(void); - -/* -Return non-zero if a character may be output by xtbsp_uart_putchar() -without waiting, else return 0. -Any pending xtbsp_uart_int_tx interrupt is cleared. -*/ -extern int xtbsp_uart_put_isready(void); - -/* -Return the enable status of all UART interrupts represented by this API, -that is those with bits defined in type xtbsp_uart_int (1 bit = enabled). -This is the enable status at the device, not the processor's INTENABLE. -*/ -extern xtbsp_uart_int xtbsp_uart_int_enable_status(void); - -/* -Enable selected UART interrupts at the device. -*/ -extern void xtbsp_uart_int_enable(const xtbsp_uart_int mask); - -/* -Disable selected UART interrupts at the device. -*/ -extern void xtbsp_uart_int_disable(const xtbsp_uart_int mask); - -/* -Return the interrupt number (0..31) to which the selected UART interrupt -is connected. May be used with the link-time HAL to obtain more information, -eg. Xthal_intlevel_mask[xtbsp_uart_int_number(xtbsp_uart_int_rx)] -This information can be used to control the processor's INTENABLE, etc. -Result is -1 if not connected, undefined if mask has more than 1 bit set. -*/ -extern int xtbsp_uart_int_number(const xtbsp_uart_int mask); - - -/******************************************************************************* -DISPLAY. -Supports a single display that can render a series of ASCII characters. -Functions are provided to perform generic display tasks such as display -a string, display character by character, or blank the display. -Chars are 7-bit printable ASCII. Strings are C style NUL \0 terminated. -These functions busy-wait for any required timing delays so the caller does -not have to deal with timing. Some displays require long delays which in -some client applications warrant a board and RTOS specific approach to -driving the display, however that is beyond the scope of this API. -*/ - -/* -Return non-zero if board has a display. -*/ -extern int xtbsp_display_exists(void); - -/* -Initialize the display. Must call before any of the following functions. -*/ -extern void xtbsp_display_init(void); - -/* -Display a single char at position pos (0 is leftmost). Other positions are -left untouched. Positions beyond the width of the display are ignored. -*/ -extern void xtbsp_display_char(unsigned pos, const char c); - -/* -Display a string. Blank-pad to or truncate at the end of the display -(overwrites any previous string so don't need to blank display first). -*/ -extern void xtbsp_display_string(const char *s); - -/* -Blank (clear) the entire display. -*/ -extern void xtbsp_display_blank(void); - - - -#ifdef __cplusplus -} -#endif - -#endif /* _XTBSP_H_ */ diff --git a/src/arch/xtensa/include/xtensa/xtensa-libdb-macros.h b/src/arch/xtensa/include/xtensa/xtensa-libdb-macros.h deleted file mode 100644 index 8f67184d3454..000000000000 --- a/src/arch/xtensa/include/xtensa/xtensa-libdb-macros.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * xtensa-libdb-macros.h - */ - -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/Software/libdb/xtensa-libdb-macros.h#1 $ */ - -/* Copyright (c) 2004-2008 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ - -#ifndef __H_LIBDB_MACROS -#define __H_LIBDB_MACROS - -/* - * This header file provides macros used to construct, identify and use - * "target numbers" that are assigned to various types of Xtensa processor - * registers and states. These target numbers are used by GDB in the remote - * protocol, and are thus used by all GDB debugger agents (targets). - * They are also used in ELF debugger information sections (stabs, dwarf, etc). - * - * These macros are separated from xtensa-libdb.h because they are needed - * by certain debugger agents that do not use or have access to libdb, - * e.g. the OCD daemon, RedBoot, XMON, etc. - * - * For the time being, for compatibility with certain 3rd party debugger - * software vendors, target numbers are limited to 16 bits. It is - * conceivable that this will be extended in the future to 32 bits. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef uint32 - #define uint32 unsigned int -#endif -#ifndef int32 - #define int32 int -#endif - - -/* - * Macros to form register "target numbers" for various standard registers/states: - */ -#define XTENSA_DBREGN_INVALID -1 /* not a valid target number */ -#define XTENSA_DBREGN_A(n) (0x0000+(n)) /* address registers a0..a15 */ -#define XTENSA_DBREGN_B(n) (0x0010+(n)) /* boolean bits b0..b15 */ -#define XTENSA_DBREGN_PC 0x0020 /* program counter */ - /* 0x0021 RESERVED for use by Tensilica */ -#define XTENSA_DBREGN_BO(n) (0x0022+(n)) /* boolean octuple-bits bo0..bo1 */ -#define XTENSA_DBREGN_BQ(n) (0x0024+(n)) /* boolean quadruple-bits bq0..bq3 */ -#define XTENSA_DBREGN_BD(n) (0x0028+(n)) /* boolean double-bits bd0..bd7 */ -#define XTENSA_DBREGN_F(n) (0x0030+(n)) /* floating point registers f0..f15 */ -#define XTENSA_DBREGN_VEC(n) (0x0040+(n)) /* Vectra vec regs v0..v15 */ -#define XTENSA_DBREGN_VSEL(n) (0x0050+(n)) /* Vectra sel s0..s3 (V1) ..s7 (V2) */ -#define XTENSA_DBREGN_VALIGN(n) (0x0058+(n)) /* Vectra valign regs u0..u3 */ -#define XTENSA_DBREGN_VCOEFF(n) (0x005C+(n)) /* Vectra I vcoeff regs c0..c1 */ - /* 0x005E..0x005F RESERVED for use by Tensilica */ -#define XTENSA_DBREGN_AEP(n) (0x0060+(n)) /* HiFi2 Audio Engine regs aep0..aep7 */ -#define XTENSA_DBREGN_AEQ(n) (0x0068+(n)) /* HiFi2 Audio Engine regs aeq0..aeq3 */ - /* 0x006C..0x00FF RESERVED for use by Tensilica */ -#define XTENSA_DBREGN_AR(n) (0x0100+(n)) /* physical address regs ar0..ar63 - (note: only with window option) */ - /* 0x0140..0x01FF RESERVED for use by Tensilica */ -#define XTENSA_DBREGN_SREG(n) (0x0200+(n)) /* special registers 0..255 (core) */ -#define XTENSA_DBREGN_BR XTENSA_DBREGN_SREG(0x04) /* all 16 boolean bits, BR */ -#define XTENSA_DBREGN_MR(n) XTENSA_DBREGN_SREG(0x20+(n)) /* MAC16 registers m0..m3 */ -#define XTENSA_DBREGN_UREG(n) (0x0300+(n)) /* user registers 0..255 (TIE) */ - /* 0x0400..0x0FFF RESERVED for use by Tensilica */ - /* 0x1000..0x1FFF user-defined regfiles */ - /* 0x2000..0xEFFF other states (and regfiles) */ -#define XTENSA_DBREGN_DBAGENT(n) (0xF000+(n)) /* non-processor "registers" 0..4095 for - 3rd-party debugger agent defined use */ - /* > 0xFFFF (32-bit) RESERVED for use by Tensilica */ -/*#define XTENSA_DBREGN_CONTEXT(n) (0x02000000+((n)<<20))*/ /* add this macro's value to a target - number to identify a specific context 0..31 - for context-replicated registers */ -#define XTENSA_DBREGN_MASK 0xFFFF /* mask of valid target_number bits */ -#define XTENSA_DBREGN_WRITE_SIDE 0x04000000 /* flag to request write half of a register - split into distinct read and write entries - with the same target number (currently only - valid in a couple of libdb API functions; - see xtensa-libdb.h for details) */ - -/* - * Macros to identify specific ranges of target numbers (formed above): - * NOTE: any context number (or other upper 12 bits) are considered - * modifiers and are thus stripped out for identification purposes. - */ -#define XTENSA_DBREGN_IS_VALID(tn) (((tn) & ~0xFFFF) == 0) /* just tests it's 16-bit unsigned */ -#define XTENSA_DBREGN_IS_A(tn) (((tn) & 0xFFF0)==0x0000) /* is a0..a15 */ -#define XTENSA_DBREGN_IS_B(tn) (((tn) & 0xFFF0)==0x0010) /* is b0..b15 */ -#define XTENSA_DBREGN_IS_PC(tn) (((tn) & 0xFFFF)==0x0020) /* is program counter */ -#define XTENSA_DBREGN_IS_BO(tn) (((tn) & 0xFFFE)==0x0022) /* is bo0..bo1 */ -#define XTENSA_DBREGN_IS_BQ(tn) (((tn) & 0xFFFC)==0x0024) /* is bq0..bq3 */ -#define XTENSA_DBREGN_IS_BD(tn) (((tn) & 0xFFF8)==0x0028) /* is bd0..bd7 */ -#define XTENSA_DBREGN_IS_F(tn) (((tn) & 0xFFF0)==0x0030) /* is f0..f15 */ -#define XTENSA_DBREGN_IS_VEC(tn) (((tn) & 0xFFF0)==0x0040) /* is v0..v15 */ -#define XTENSA_DBREGN_IS_VSEL(tn) (((tn) & 0xFFF8)==0x0050) /* is s0..s7 (s0..s3 in V1) */ -#define XTENSA_DBREGN_IS_VALIGN(tn) (((tn) & 0xFFFC)==0x0058) /* is u0..u3 */ -#define XTENSA_DBREGN_IS_VCOEFF(tn) (((tn) & 0xFFFE)==0x005C) /* is c0..c1 */ -#define XTENSA_DBREGN_IS_AEP(tn) (((tn) & 0xFFF8)==0x0060) /* is aep0..aep7 */ -#define XTENSA_DBREGN_IS_AEQ(tn) (((tn) & 0xFFFC)==0x0068) /* is aeq0..aeq3 */ -#define XTENSA_DBREGN_IS_AR(tn) (((tn) & 0xFFC0)==0x0100) /* is ar0..ar63 */ -#define XTENSA_DBREGN_IS_SREG(tn) (((tn) & 0xFF00)==0x0200) /* is special register */ -#define XTENSA_DBREGN_IS_BR(tn) (((tn) & 0xFFFF)==XTENSA_DBREGN_SREG(0x04)) /* is BR */ -#define XTENSA_DBREGN_IS_MR(tn) (((tn) & 0xFFFC)==XTENSA_DBREGN_SREG(0x20)) /* m0..m3 */ -#define XTENSA_DBREGN_IS_UREG(tn) (((tn) & 0xFF00)==0x0300) /* is user register */ -#define XTENSA_DBREGN_IS_DBAGENT(tn) (((tn) & 0xF000)==0xF000) /* is non-processor */ -/*#define XTENSA_DBREGN_IS_CONTEXT(tn) (((tn) & 0x02000000) != 0)*/ /* specifies context # */ - -/* - * Macros to extract register index from a register "target number" - * when a specific range has been identified using one of the _IS_ macros above. - * These macros only return a useful value if the corresponding _IS_ macro returns true. - */ -#define XTENSA_DBREGN_A_INDEX(tn) ((tn) & 0x0F) /* 0..15 for a0..a15 */ -#define XTENSA_DBREGN_B_INDEX(tn) ((tn) & 0x0F) /* 0..15 for b0..b15 */ -#define XTENSA_DBREGN_BO_INDEX(tn) ((tn) & 0x01) /* 0..1 for bo0..bo1 */ -#define XTENSA_DBREGN_BQ_INDEX(tn) ((tn) & 0x03) /* 0..3 for bq0..bq3 */ -#define XTENSA_DBREGN_BD_INDEX(tn) ((tn) & 0x07) /* 0..7 for bd0..bd7 */ -#define XTENSA_DBREGN_F_INDEX(tn) ((tn) & 0x0F) /* 0..15 for f0..f15 */ -#define XTENSA_DBREGN_VEC_INDEX(tn) ((tn) & 0x0F) /* 0..15 for v0..v15 */ -#define XTENSA_DBREGN_VSEL_INDEX(tn) ((tn) & 0x07) /* 0..7 for s0..s7 */ -#define XTENSA_DBREGN_VALIGN_INDEX(tn) ((tn) & 0x03) /* 0..3 for u0..u3 */ -#define XTENSA_DBREGN_VCOEFF_INDEX(tn) ((tn) & 0x01) /* 0..1 for c0..c1 */ -#define XTENSA_DBREGN_AEP_INDEX(tn) ((tn) & 0x07) /* 0..7 for aep0..aep7 */ -#define XTENSA_DBREGN_AEQ_INDEX(tn) ((tn) & 0x03) /* 0..3 for aeq0..aeq3 */ -#define XTENSA_DBREGN_AR_INDEX(tn) ((tn) & 0x3F) /* 0..63 for ar0..ar63 */ -#define XTENSA_DBREGN_SREG_INDEX(tn) ((tn) & 0xFF) /* 0..255 for special registers */ -#define XTENSA_DBREGN_MR_INDEX(tn) ((tn) & 0x03) /* 0..3 for m0..m3 */ -#define XTENSA_DBREGN_UREG_INDEX(tn) ((tn) & 0xFF) /* 0..255 for user registers */ -#define XTENSA_DBREGN_DBAGENT_INDEX(tn) ((tn) & 0xFFF) /* 0..4095 for non-processor */ -/*#define XTENSA_DBREGN_CONTEXT_INDEX(tn) (((tn) >> 20) & 0x1F)*/ /* 0..31 context numbers */ - - - - -#ifdef __cplusplus -} -#endif - -#endif /* __H_LIBDB_MACROS */ - diff --git a/src/arch/xtensa/include/xtensa/xtensa-versions.h b/src/arch/xtensa/include/xtensa/xtensa-versions.h deleted file mode 100644 index 30f318eb0e6b..000000000000 --- a/src/arch/xtensa/include/xtensa/xtensa-versions.h +++ /dev/null @@ -1,398 +0,0 @@ -/* - xtensa-versions.h -- definitions of Xtensa version and release numbers - - This file defines most Xtensa-related product versions and releases - that exist so far. - It also provides a bit of information about which ones are current. - This file changes every release, as versions/releases get added. - - - $Id: //depot/rel/Foxhill/dot.8/Xtensa/Software/misc/xtensa-versions.h.tpp#1 $ - - Copyright (c) 2006-2010 Tensilica Inc. - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - -#ifndef XTENSA_VERSIONS_H -#define XTENSA_VERSIONS_H - - -/* - * NOTE: A "release" is a collection of product versions - * made available at once (together) to customers. - * In the past, release and version names all matched in T####.# form, - * making the distinction irrelevant. - * Starting with the RA-2004.1 release, this is no longer the case. - */ - - -/* Hardware (Xtensa/Diamond processor) versions: */ -#define XTENSA_HWVERSION_T1020_0 102000 /* versions T1020.0 */ -#define XTENSA_HWCIDSCHEME_T1020_0 10 -#define XTENSA_HWCIDVERS_T1020_0 2 -#define XTENSA_HWVERSION_T1020_1 102001 /* versions T1020.1 */ -#define XTENSA_HWCIDSCHEME_T1020_1 10 -#define XTENSA_HWCIDVERS_T1020_1 3 -#define XTENSA_HWVERSION_T1020_2 102002 /* versions T1020.2 */ -#define XTENSA_HWCIDSCHEME_T1020_2 10 -#define XTENSA_HWCIDVERS_T1020_2 4 -#define XTENSA_HWVERSION_T1020_2B 102002 /* versions T1020.2b */ -#define XTENSA_HWCIDSCHEME_T1020_2B 10 -#define XTENSA_HWCIDVERS_T1020_2B 5 -#define XTENSA_HWVERSION_T1020_3 102003 /* versions T1020.3 */ -#define XTENSA_HWCIDSCHEME_T1020_3 10 -#define XTENSA_HWCIDVERS_T1020_3 6 -#define XTENSA_HWVERSION_T1020_4 102004 /* versions T1020.4 */ -#define XTENSA_HWCIDSCHEME_T1020_4 10 -#define XTENSA_HWCIDVERS_T1020_4 7 -#define XTENSA_HWVERSION_T1030_0 103000 /* versions T1030.0 */ -#define XTENSA_HWCIDSCHEME_T1030_0 10 -#define XTENSA_HWCIDVERS_T1030_0 9 -#define XTENSA_HWVERSION_T1030_1 103001 /* versions T1030.1 */ -#define XTENSA_HWCIDSCHEME_T1030_1 10 -#define XTENSA_HWCIDVERS_T1030_1 10 -#define XTENSA_HWVERSION_T1030_2 103002 /* versions T1030.2 */ -#define XTENSA_HWCIDSCHEME_T1030_2 10 -#define XTENSA_HWCIDVERS_T1030_2 11 -#define XTENSA_HWVERSION_T1030_3 103003 /* versions T1030.3 */ -#define XTENSA_HWCIDSCHEME_T1030_3 10 -#define XTENSA_HWCIDVERS_T1030_3 12 -#define XTENSA_HWVERSION_T1040_0 104000 /* versions T1040.0 */ -#define XTENSA_HWCIDSCHEME_T1040_0 10 -#define XTENSA_HWCIDVERS_T1040_0 15 -#define XTENSA_HWVERSION_T1040_1 104001 /* versions T1040.1 */ -#define XTENSA_HWCIDSCHEME_T1040_1 01 -#define XTENSA_HWCIDVERS_T1040_1 32 -#define XTENSA_HWVERSION_T1040_1P 104001 /* versions T1040.1-prehotfix */ -#define XTENSA_HWCIDSCHEME_T1040_1P 10 -#define XTENSA_HWCIDVERS_T1040_1P 16 -#define XTENSA_HWVERSION_T1040_2 104002 /* versions T1040.2 */ -#define XTENSA_HWCIDSCHEME_T1040_2 01 -#define XTENSA_HWCIDVERS_T1040_2 33 -#define XTENSA_HWVERSION_T1040_3 104003 /* versions T1040.3 */ -#define XTENSA_HWCIDSCHEME_T1040_3 01 -#define XTENSA_HWCIDVERS_T1040_3 34 -#define XTENSA_HWVERSION_T1050_0 105000 /* versions T1050.0 */ -#define XTENSA_HWCIDSCHEME_T1050_0 1100 -#define XTENSA_HWCIDVERS_T1050_0 1 -#define XTENSA_HWVERSION_T1050_1 105001 /* versions T1050.1 */ -#define XTENSA_HWCIDSCHEME_T1050_1 1100 -#define XTENSA_HWCIDVERS_T1050_1 2 -#define XTENSA_HWVERSION_T1050_2 105002 /* versions T1050.2 */ -#define XTENSA_HWCIDSCHEME_T1050_2 1100 -#define XTENSA_HWCIDVERS_T1050_2 4 -#define XTENSA_HWVERSION_T1050_3 105003 /* versions T1050.3 */ -#define XTENSA_HWCIDSCHEME_T1050_3 1100 -#define XTENSA_HWCIDVERS_T1050_3 6 -#define XTENSA_HWVERSION_T1050_4 105004 /* versions T1050.4 */ -#define XTENSA_HWCIDSCHEME_T1050_4 1100 -#define XTENSA_HWCIDVERS_T1050_4 7 -#define XTENSA_HWVERSION_T1050_5 105005 /* versions T1050.5 */ -#define XTENSA_HWCIDSCHEME_T1050_5 1100 -#define XTENSA_HWCIDVERS_T1050_5 8 -#define XTENSA_HWVERSION_RA_2004_1 210000 /* versions LX1.0.0 */ -#define XTENSA_HWCIDSCHEME_RA_2004_1 1100 -#define XTENSA_HWCIDVERS_RA_2004_1 3 -#define XTENSA_HWVERSION_RA_2005_1 210001 /* versions LX1.0.1 */ -#define XTENSA_HWCIDSCHEME_RA_2005_1 1100 -#define XTENSA_HWCIDVERS_RA_2005_1 20 -#define XTENSA_HWVERSION_RA_2005_2 210002 /* versions LX1.0.2 */ -#define XTENSA_HWCIDSCHEME_RA_2005_2 1100 -#define XTENSA_HWCIDVERS_RA_2005_2 21 -#define XTENSA_HWVERSION_RA_2005_3 210003 /* versions LX1.0.3, X6.0.3 */ -#define XTENSA_HWCIDSCHEME_RA_2005_3 1100 -#define XTENSA_HWCIDVERS_RA_2005_3 22 -#define XTENSA_HWVERSION_RA_2006_4 210004 /* versions LX1.0.4, X6.0.4 */ -#define XTENSA_HWCIDSCHEME_RA_2006_4 1100 -#define XTENSA_HWCIDVERS_RA_2006_4 23 -#define XTENSA_HWVERSION_RA_2006_5 210005 /* versions LX1.0.5, X6.0.5 */ -#define XTENSA_HWCIDSCHEME_RA_2006_5 1100 -#define XTENSA_HWCIDVERS_RA_2006_5 24 -#define XTENSA_HWVERSION_RA_2006_6 210006 /* versions LX1.0.6, X6.0.6 */ -#define XTENSA_HWCIDSCHEME_RA_2006_6 1100 -#define XTENSA_HWCIDVERS_RA_2006_6 25 -#define XTENSA_HWVERSION_RA_2007_7 210007 /* versions LX1.0.7, X6.0.7 */ -#define XTENSA_HWCIDSCHEME_RA_2007_7 1100 -#define XTENSA_HWCIDVERS_RA_2007_7 26 -#define XTENSA_HWVERSION_RA_2008_8 210008 /* versions LX1.0.8, X6.0.8 */ -#define XTENSA_HWCIDSCHEME_RA_2008_8 1100 -#define XTENSA_HWCIDVERS_RA_2008_8 27 -#define XTENSA_HWVERSION_RB_2006_0 220000 /* versions LX2.0.0, X7.0.0 */ -#define XTENSA_HWCIDSCHEME_RB_2006_0 1100 -#define XTENSA_HWCIDVERS_RB_2006_0 48 -#define XTENSA_HWVERSION_RB_2007_1 220001 /* versions LX2.0.1, X7.0.1 */ -#define XTENSA_HWCIDSCHEME_RB_2007_1 1100 -#define XTENSA_HWCIDVERS_RB_2007_1 49 -#define XTENSA_HWVERSION_RB_2007_2 221000 /* versions LX2.1.0, X7.1.0 */ -#define XTENSA_HWCIDSCHEME_RB_2007_2 1100 -#define XTENSA_HWCIDVERS_RB_2007_2 52 -#define XTENSA_HWVERSION_RB_2008_3 221001 /* versions LX2.1.1, X7.1.1 */ -#define XTENSA_HWCIDSCHEME_RB_2008_3 1100 -#define XTENSA_HWCIDVERS_RB_2008_3 53 -#define XTENSA_HWVERSION_RB_2008_4 221002 /* versions LX2.1.2, X7.1.2 */ -#define XTENSA_HWCIDSCHEME_RB_2008_4 1100 -#define XTENSA_HWCIDVERS_RB_2008_4 54 -#define XTENSA_HWVERSION_RB_2009_5 221003 /* versions LX2.1.3, X7.1.3 */ -#define XTENSA_HWCIDSCHEME_RB_2009_5 1100 -#define XTENSA_HWCIDVERS_RB_2009_5 55 -#define XTENSA_HWVERSION_RB_2007_2_MP 221100 /* versions LX2.1.8-MP, X7.1.8-MP */ -#define XTENSA_HWCIDSCHEME_RB_2007_2_MP 1100 -#define XTENSA_HWCIDVERS_RB_2007_2_MP 64 -#define XTENSA_HWVERSION_RC_2009_0 230000 /* versions LX3.0.0, X8.0.0, MX1.0.0 */ -#define XTENSA_HWCIDSCHEME_RC_2009_0 1100 -#define XTENSA_HWCIDVERS_RC_2009_0 65 -#define XTENSA_HWVERSION_RC_2010_1 230001 /* versions LX3.0.1, X8.0.1, MX1.0.1 */ -#define XTENSA_HWCIDSCHEME_RC_2010_1 1100 -#define XTENSA_HWCIDVERS_RC_2010_1 66 -#define XTENSA_HWVERSION_RC_2010_2 230002 /* versions LX3.0.2, X8.0.2, MX1.0.2 */ -#define XTENSA_HWCIDSCHEME_RC_2010_2 1100 -#define XTENSA_HWCIDVERS_RC_2010_2 67 -#define XTENSA_HWVERSION_RC_2011_3 230003 /* versions LX3.0.3, X8.0.3, MX1.0.3 */ -#define XTENSA_HWCIDSCHEME_RC_2011_3 1100 -#define XTENSA_HWCIDVERS_RC_2011_3 68 -#define XTENSA_HWVERSION_RD_2010_0 240000 /* versions LX4.0.0, X9.0.0, MX1.1.0, TX1.0.0 */ -#define XTENSA_HWCIDSCHEME_RD_2010_0 1100 -#define XTENSA_HWCIDVERS_RD_2010_0 80 -#define XTENSA_HWVERSION_RD_2011_1 240001 /* versions LX4.0.1, X9.0.1, MX1.1.1, TX1.0.1 */ -#define XTENSA_HWCIDSCHEME_RD_2011_1 1100 -#define XTENSA_HWCIDVERS_RD_2011_1 81 -#define XTENSA_HWVERSION_RD_2011_2 240002 /* versions LX4.0.2, X9.0.2, MX1.1.2, TX1.0.2 */ -#define XTENSA_HWCIDSCHEME_RD_2011_2 1100 -#define XTENSA_HWCIDVERS_RD_2011_2 82 -#define XTENSA_HWVERSION_RD_2011_3 240003 /* versions LX4.0.3, X9.0.3, MX1.1.3, TX1.0.3 */ -#define XTENSA_HWCIDSCHEME_RD_2011_3 1100 -#define XTENSA_HWCIDVERS_RD_2011_3 83 -#define XTENSA_HWVERSION_RD_2012_4 240004 /* versions LX4.0.4, X9.0.4, MX1.1.4, TX1.0.4 */ -#define XTENSA_HWCIDSCHEME_RD_2012_4 1100 -#define XTENSA_HWCIDVERS_RD_2012_4 84 -#define XTENSA_HWVERSION_RD_2012_5 240005 /* versions LX4.0.5, X9.0.5, MX1.1.5, TX1.0.5 */ -#define XTENSA_HWCIDSCHEME_RD_2012_5 1100 -#define XTENSA_HWCIDVERS_RD_2012_5 85 -#define XTENSA_HWVERSION_RE_2012_0 250000 /* versions LX5.0.0, X10.0.0, MX1.2.0 */ -#define XTENSA_HWCIDSCHEME_RE_2012_0 1100 -#define XTENSA_HWCIDVERS_RE_2012_0 96 -#define XTENSA_HWVERSION_RE_2012_1 250001 /* versions LX5.0.1, X10.0.1, MX1.2.1 */ -#define XTENSA_HWCIDSCHEME_RE_2012_1 1100 -#define XTENSA_HWCIDVERS_RE_2012_1 97 -#define XTENSA_HWVERSION_RE_2013_2 250002 /* versions LX5.0.2, X10.0.2, MX1.2.2 */ -#define XTENSA_HWCIDSCHEME_RE_2013_2 1100 -#define XTENSA_HWCIDVERS_RE_2013_2 98 -#define XTENSA_HWVERSION_RE_2013_3 250003 /* versions LX5.0.3, X10.0.3, MX1.2.3 */ -#define XTENSA_HWCIDSCHEME_RE_2013_3 1100 -#define XTENSA_HWCIDVERS_RE_2013_3 99 -#define XTENSA_HWVERSION_RE_2013_4 250004 /* versions LX5.0.4, X10.0.4, MX1.2.4 */ -#define XTENSA_HWCIDSCHEME_RE_2013_4 1100 -#define XTENSA_HWCIDVERS_RE_2013_4 100 -#define XTENSA_HWVERSION_RE_2014_5 250005 /* versions LX5.0.5, X10.0.5, MX1.2.5 */ -#define XTENSA_HWCIDSCHEME_RE_2014_5 1100 -#define XTENSA_HWCIDVERS_RE_2014_5 101 -#define XTENSA_HWVERSION_RE_2015_6 250006 /* versions LX5.0.6, X10.0.6, MX1.2.6 */ -#define XTENSA_HWCIDSCHEME_RE_2015_6 1100 -#define XTENSA_HWCIDVERS_RE_2015_6 102 -#define XTENSA_HWVERSION_RF_2014_0 260000 /* versions LX6.0.0, X11.0.0, MX1.3.0 */ -#define XTENSA_HWCIDSCHEME_RF_2014_0 1100 -#define XTENSA_HWCIDVERS_RF_2014_0 112 -#define XTENSA_HWVERSION_RF_2014_1 260001 /* versions LX6.0.1, X11.0.1 */ -#define XTENSA_HWCIDSCHEME_RF_2014_1 1100 -#define XTENSA_HWCIDVERS_RF_2014_1 113 -#define XTENSA_HWVERSION_RF_2015_2 260002 /* versions LX6.0.2, X11.0.2 */ -#define XTENSA_HWCIDSCHEME_RF_2015_2 1100 -#define XTENSA_HWCIDVERS_RF_2015_2 114 -#define XTENSA_HWVERSION_RF_2015_3 260003 /* versions LX6.0.3, X11.0.3 */ -#define XTENSA_HWCIDSCHEME_RF_2015_3 1100 -#define XTENSA_HWCIDVERS_RF_2015_3 115 -#define XTENSA_HWVERSION_RF_2016_4 260004 /* versions LX6.0.4, X11.0.4 */ -#define XTENSA_HWCIDSCHEME_RF_2016_4 1100 -#define XTENSA_HWCIDVERS_RF_2016_4 116 -#define XTENSA_HWVERSION_RG_2015_0 270000 /* versions LX7.0.0 */ -#define XTENSA_HWCIDSCHEME_RG_2015_0 1100 -#define XTENSA_HWCIDVERS_RG_2015_0 128 -#define XTENSA_HWVERSION_RG_2015_1 270001 /* versions LX7.0.1 */ -#define XTENSA_HWCIDSCHEME_RG_2015_1 1100 -#define XTENSA_HWCIDVERS_RG_2015_1 129 -#define XTENSA_HWVERSION_RG_2015_2 270002 /* versions LX7.0.2 */ -#define XTENSA_HWCIDSCHEME_RG_2015_2 1100 -#define XTENSA_HWCIDVERS_RG_2015_2 130 -#define XTENSA_HWVERSION_RG_2016_3 270003 /* versions LX7.0.3 */ -#define XTENSA_HWCIDSCHEME_RG_2016_3 1100 -#define XTENSA_HWCIDVERS_RG_2016_3 131 -#define XTENSA_HWVERSION_RG_2016_4 270004 /* versions LX7.0.4 */ -#define XTENSA_HWCIDSCHEME_RG_2016_4 1100 -#define XTENSA_HWCIDVERS_RG_2016_4 132 -#define XTENSA_HWVERSION_RG_2017_5 270005 /* versions LX7.0.5 */ -#define XTENSA_HWCIDSCHEME_RG_2017_5 1100 -#define XTENSA_HWCIDVERS_RG_2017_5 133 -#define XTENSA_HWVERSION_RG_2017_6 270006 /* versions LX7.0.6 */ -#define XTENSA_HWCIDSCHEME_RG_2017_6 1100 -#define XTENSA_HWCIDVERS_RG_2017_6 134 -#define XTENSA_HWVERSION_RG_2017_7 270007 /* versions LX7.0.7 */ -#define XTENSA_HWCIDSCHEME_RG_2017_7 1100 -#define XTENSA_HWCIDVERS_RG_2017_7 135 -#define XTENSA_HWVERSION_RG_2017_8 270008 /* versions LX7.0.8 */ -#define XTENSA_HWCIDSCHEME_RG_2017_8 1100 -#define XTENSA_HWCIDVERS_RG_2017_8 136 -#define XTENSA_HWVERSION_RH_2016_0 280000 /* versions LX8.0.0, NX1.0.0, SX1.0.0 */ -#define XTENSA_HWCIDSCHEME_RH_2016_0 1100 -#define XTENSA_HWCIDVERS_RH_2016_0 144 - -/* Software (Xtensa Tools) versions: */ -#define XTENSA_SWVERSION_T1020_0 102000 /* versions T1020.0 */ -#define XTENSA_SWVERSION_T1020_1 102001 /* versions T1020.1 */ -#define XTENSA_SWVERSION_T1020_2 102002 /* versions T1020.2 */ -#define XTENSA_SWVERSION_T1020_2B 102002 /* versions T1020.2b */ -#define XTENSA_SWVERSION_T1020_3 102003 /* versions T1020.3 */ -#define XTENSA_SWVERSION_T1020_4 102004 /* versions T1020.4 */ -#define XTENSA_SWVERSION_T1030_0 103000 /* versions T1030.0 */ -#define XTENSA_SWVERSION_T1030_1 103001 /* versions T1030.1 */ -#define XTENSA_SWVERSION_T1030_2 103002 /* versions T1030.2 */ -#define XTENSA_SWVERSION_T1030_3 103003 /* versions T1030.3 */ -#define XTENSA_SWVERSION_T1040_0 104000 /* versions T1040.0 */ -#define XTENSA_SWVERSION_T1040_1 104001 /* versions T1040.1 */ -#define XTENSA_SWVERSION_T1040_1P 104001 /* versions T1040.1-prehotfix */ -#define XTENSA_SWVERSION_T1040_2 104002 /* versions T1040.2 */ -#define XTENSA_SWVERSION_T1040_3 104003 /* versions T1040.3 */ -#define XTENSA_SWVERSION_T1050_0 105000 /* versions T1050.0 */ -#define XTENSA_SWVERSION_T1050_1 105001 /* versions T1050.1 */ -#define XTENSA_SWVERSION_T1050_2 105002 /* versions T1050.2 */ -#define XTENSA_SWVERSION_T1050_3 105003 /* versions T1050.3 */ -#define XTENSA_SWVERSION_T1050_4 105004 /* versions T1050.4 */ -#define XTENSA_SWVERSION_T1050_5 105005 /* versions T1050.5 */ -#define XTENSA_SWVERSION_RA_2004_1 600000 /* versions 6.0.0 */ -#define XTENSA_SWVERSION_RA_2005_1 600001 /* versions 6.0.1 */ -#define XTENSA_SWVERSION_RA_2005_2 600002 /* versions 6.0.2 */ -#define XTENSA_SWVERSION_RA_2005_3 600003 /* versions 6.0.3 */ -#define XTENSA_SWVERSION_RA_2006_4 600004 /* versions 6.0.4 */ -#define XTENSA_SWVERSION_RA_2006_5 600005 /* versions 6.0.5 */ -#define XTENSA_SWVERSION_RA_2006_6 600006 /* versions 6.0.6 */ -#define XTENSA_SWVERSION_RA_2007_7 600007 /* versions 6.0.7 */ -#define XTENSA_SWVERSION_RA_2008_8 600008 /* versions 6.0.8 */ -#define XTENSA_SWVERSION_RB_2006_0 700000 /* versions 7.0.0 */ -#define XTENSA_SWVERSION_RB_2007_1 700001 /* versions 7.0.1 */ -#define XTENSA_SWVERSION_RB_2007_2 701000 /* versions 7.1.0 */ -#define XTENSA_SWVERSION_RB_2008_3 701001 /* versions 7.1.1 */ -#define XTENSA_SWVERSION_RB_2008_4 701002 /* versions 7.1.2 */ -#define XTENSA_SWVERSION_RB_2009_5 701003 /* versions 7.1.3 */ -#define XTENSA_SWVERSION_RB_2007_2_MP 701100 /* versions 7.1.8-MP */ -#define XTENSA_SWVERSION_RC_2009_0 800000 /* versions 8.0.0 */ -#define XTENSA_SWVERSION_RC_2010_1 800001 /* versions 8.0.1 */ -#define XTENSA_SWVERSION_RC_2010_2 800002 /* versions 8.0.2 */ -#define XTENSA_SWVERSION_RC_2011_3 800003 /* versions 8.0.3 */ -#define XTENSA_SWVERSION_RD_2010_0 900000 /* versions 9.0.0 */ -#define XTENSA_SWVERSION_RD_2011_1 900001 /* versions 9.0.1 */ -#define XTENSA_SWVERSION_RD_2011_2 900002 /* versions 9.0.2 */ -#define XTENSA_SWVERSION_RD_2011_3 900003 /* versions 9.0.3 */ -#define XTENSA_SWVERSION_RD_2012_4 900004 /* versions 9.0.4 */ -#define XTENSA_SWVERSION_RD_2012_5 900005 /* versions 9.0.5 */ -#define XTENSA_SWVERSION_RE_2012_0 1000000 /* versions 10.0.0 */ -#define XTENSA_SWVERSION_RE_2012_1 1000001 /* versions 10.0.1 */ -#define XTENSA_SWVERSION_RE_2013_2 1000002 /* versions 10.0.2 */ -#define XTENSA_SWVERSION_RE_2013_3 1000003 /* versions 10.0.3 */ -#define XTENSA_SWVERSION_RE_2013_4 1000004 /* versions 10.0.4 */ -#define XTENSA_SWVERSION_RE_2014_5 1000005 /* versions 10.0.5 */ -#define XTENSA_SWVERSION_RE_2015_6 1000006 /* versions 10.0.6 */ -#define XTENSA_SWVERSION_RF_2014_0 1100000 /* versions 11.0.0 */ -#define XTENSA_SWVERSION_RF_2014_1 1100001 /* versions 11.0.1 */ -#define XTENSA_SWVERSION_RF_2015_2 1100002 /* versions 11.0.2 */ -#define XTENSA_SWVERSION_RF_2015_3 1100003 /* versions 11.0.3 */ -#define XTENSA_SWVERSION_RF_2016_4 1100004 /* versions 11.0.4 */ -#define XTENSA_SWVERSION_RG_2015_0 1200000 /* versions 12.0.0 */ -#define XTENSA_SWVERSION_RG_2015_1 1200001 /* versions 12.0.1 */ -#define XTENSA_SWVERSION_RG_2015_2 1200002 /* versions 12.0.2 */ -#define XTENSA_SWVERSION_RG_2016_3 1200003 /* versions 12.0.3 */ -#define XTENSA_SWVERSION_RG_2016_4 1200004 /* versions 12.0.4 */ -#define XTENSA_SWVERSION_RG_2017_5 1200005 /* versions 12.0.5 */ -#define XTENSA_SWVERSION_RG_2017_6 1200006 /* versions 12.0.6 */ -#define XTENSA_SWVERSION_RG_2017_7 1200007 /* versions 12.0.7 */ -#define XTENSA_SWVERSION_RG_2017_8 1200008 /* versions 12.0.8 */ -#define XTENSA_SWVERSION_RH_2016_0 1300000 /* versions 13.0.0 */ -#define XTENSA_SWVERSION_T1040_1_PREHOTFIX XTENSA_SWVERSION_T1040_1P /* T1040.1-prehotfix */ -#define XTENSA_SWVERSION_6_0_0 XTENSA_SWVERSION_RA_2004_1 /* 6.0.0 */ -#define XTENSA_SWVERSION_6_0_1 XTENSA_SWVERSION_RA_2005_1 /* 6.0.1 */ -#define XTENSA_SWVERSION_6_0_2 XTENSA_SWVERSION_RA_2005_2 /* 6.0.2 */ -#define XTENSA_SWVERSION_6_0_3 XTENSA_SWVERSION_RA_2005_3 /* 6.0.3 */ -#define XTENSA_SWVERSION_6_0_4 XTENSA_SWVERSION_RA_2006_4 /* 6.0.4 */ -#define XTENSA_SWVERSION_6_0_5 XTENSA_SWVERSION_RA_2006_5 /* 6.0.5 */ -#define XTENSA_SWVERSION_6_0_6 XTENSA_SWVERSION_RA_2006_6 /* 6.0.6 */ -#define XTENSA_SWVERSION_6_0_7 XTENSA_SWVERSION_RA_2007_7 /* 6.0.7 */ -#define XTENSA_SWVERSION_6_0_8 XTENSA_SWVERSION_RA_2008_8 /* 6.0.8 */ -#define XTENSA_SWVERSION_7_0_0 XTENSA_SWVERSION_RB_2006_0 /* 7.0.0 */ -#define XTENSA_SWVERSION_7_0_1 XTENSA_SWVERSION_RB_2007_1 /* 7.0.1 */ -#define XTENSA_SWVERSION_7_1_0 XTENSA_SWVERSION_RB_2007_2 /* 7.1.0 */ -#define XTENSA_SWVERSION_7_1_1 XTENSA_SWVERSION_RB_2008_3 /* 7.1.1 */ -#define XTENSA_SWVERSION_7_1_2 XTENSA_SWVERSION_RB_2008_4 /* 7.1.2 */ -#define XTENSA_SWVERSION_7_1_3 XTENSA_SWVERSION_RB_2009_5 /* 7.1.3 */ -#define XTENSA_SWVERSION_7_1_8_MP XTENSA_SWVERSION_RB_2007_2_MP /* 7.1.8-MP */ -#define XTENSA_SWVERSION_8_0_0 XTENSA_SWVERSION_RC_2009_0 /* 8.0.0 */ -#define XTENSA_SWVERSION_8_0_1 XTENSA_SWVERSION_RC_2010_1 /* 8.0.1 */ -#define XTENSA_SWVERSION_8_0_2 XTENSA_SWVERSION_RC_2010_2 /* 8.0.2 */ -#define XTENSA_SWVERSION_8_0_3 XTENSA_SWVERSION_RC_2011_3 /* 8.0.3 */ -#define XTENSA_SWVERSION_9_0_0 XTENSA_SWVERSION_RD_2010_0 /* 9.0.0 */ -#define XTENSA_SWVERSION_9_0_1 XTENSA_SWVERSION_RD_2011_1 /* 9.0.1 */ -#define XTENSA_SWVERSION_9_0_2 XTENSA_SWVERSION_RD_2011_2 /* 9.0.2 */ -#define XTENSA_SWVERSION_9_0_3 XTENSA_SWVERSION_RD_2011_3 /* 9.0.3 */ -#define XTENSA_SWVERSION_9_0_4 XTENSA_SWVERSION_RD_2012_4 /* 9.0.4 */ -#define XTENSA_SWVERSION_9_0_5 XTENSA_SWVERSION_RD_2012_5 /* 9.0.5 */ -#define XTENSA_SWVERSION_10_0_0 XTENSA_SWVERSION_RE_2012_0 /* 10.0.0 */ -#define XTENSA_SWVERSION_10_0_1 XTENSA_SWVERSION_RE_2012_1 /* 10.0.1 */ -#define XTENSA_SWVERSION_10_0_2 XTENSA_SWVERSION_RE_2013_2 /* 10.0.2 */ -#define XTENSA_SWVERSION_10_0_3 XTENSA_SWVERSION_RE_2013_3 /* 10.0.3 */ -#define XTENSA_SWVERSION_10_0_4 XTENSA_SWVERSION_RE_2013_4 /* 10.0.4 */ -#define XTENSA_SWVERSION_10_0_5 XTENSA_SWVERSION_RE_2014_5 /* 10.0.5 */ -#define XTENSA_SWVERSION_10_0_6 XTENSA_SWVERSION_RE_2015_6 /* 10.0.6 */ -#define XTENSA_SWVERSION_11_0_0 XTENSA_SWVERSION_RF_2014_0 /* 11.0.0 */ -#define XTENSA_SWVERSION_11_0_1 XTENSA_SWVERSION_RF_2014_1 /* 11.0.1 */ -#define XTENSA_SWVERSION_11_0_2 XTENSA_SWVERSION_RF_2015_2 /* 11.0.2 */ -#define XTENSA_SWVERSION_11_0_3 XTENSA_SWVERSION_RF_2015_3 /* 11.0.3 */ -#define XTENSA_SWVERSION_11_0_4 XTENSA_SWVERSION_RF_2016_4 /* 11.0.4 */ -#define XTENSA_SWVERSION_12_0_0 XTENSA_SWVERSION_RG_2015_0 /* 12.0.0 */ -#define XTENSA_SWVERSION_12_0_1 XTENSA_SWVERSION_RG_2015_1 /* 12.0.1 */ -#define XTENSA_SWVERSION_12_0_2 XTENSA_SWVERSION_RG_2015_2 /* 12.0.2 */ -#define XTENSA_SWVERSION_12_0_3 XTENSA_SWVERSION_RG_2016_3 /* 12.0.3 */ -#define XTENSA_SWVERSION_12_0_4 XTENSA_SWVERSION_RG_2016_4 /* 12.0.4 */ -#define XTENSA_SWVERSION_12_0_5 XTENSA_SWVERSION_RG_2017_5 /* 12.0.5 */ -#define XTENSA_SWVERSION_12_0_6 XTENSA_SWVERSION_RG_2017_6 /* 12.0.6 */ -#define XTENSA_SWVERSION_12_0_7 XTENSA_SWVERSION_RG_2017_7 /* 12.0.7 */ -#define XTENSA_SWVERSION_12_0_8 XTENSA_SWVERSION_RG_2017_8 /* 12.0.8 */ -#define XTENSA_SWVERSION_13_0_0 XTENSA_SWVERSION_RH_2016_0 /* 13.0.0 */ - - -/* The current release: */ -#define XTENSA_RELEASE_NAME "RG-2017.8" -#define XTENSA_RELEASE_CANONICAL_NAME "RG-2017.8" - -/* The product versions within the current release: */ -#define XTENSA_SWVERSION XTENSA_SWVERSION_RG_2017_8 -#define XTENSA_SWVERSION_NAME "12.0.8" -#define XTENSA_SWVERSION_NAME_IDENT 12_0_8 -#define XTENSA_SWVERSION_CANONICAL_NAME "12.0.8" -#define XTENSA_SWVERSION_MAJORMID_NAME "12.0" -#define XTENSA_SWVERSION_MAJOR_NAME "12" -/* For product licensing (not necessarily same as *_MAJORMID_NAME): */ -#define XTENSA_SWVERSION_LICENSE_NAME "12.0" - -/* Note: there may be multiple hardware products in one release, - and software can target older hardware, so the notion of - "current" hardware versions is partially configuration dependent. - For now, "current" hardware product version info is left out - to avoid confusion. */ - -#endif /*XTENSA_VERSIONS_H*/ - diff --git a/src/arch/xtensa/include/xtensa/xtensa-xer.h b/src/arch/xtensa/include/xtensa/xtensa-xer.h deleted file mode 100644 index d1a200daf650..000000000000 --- a/src/arch/xtensa/include/xtensa/xtensa-xer.h +++ /dev/null @@ -1,149 +0,0 @@ -/* xer-constants.h -- various constants describing external registers accessed - via wer and rer. - - TODO: find a better prefix. Also conditionalize certain constants based - on number of cores and interrupts actually present. -*/ - -/* - * Copyright (c) 1999-2008 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include - -#define NUM_INTERRUPTS 27 -#define NUM_CORES 4 - -/* Routing of NMI (BInterrupt2) and interrupts 0..n-1 (BInterrupt3+) - RER reads - WER writes - */ - -#define XER_MIROUT 0x0000 -#define XER_MIROUT_LAST (XER_MIROUT + NUM_INTERRUPTS) - - -/* IPI to core M (all 16 causes). - - RER reads - WER clears - */ -#define XER_MIPICAUSE 0x0100 -#define XER_MIPICAUSE_FIELD_A_FIRST 0x0 -#define XER_MIPICAUSE_FIELD_A_LAST 0x0 -#define XER_MIPICAUSE_FIELD_B_FIRST 0x1 -#define XER_MIPICAUSE_FIELD_B_LAST 0x3 -#define XER_MIPICAUSE_FIELD_C_FIRST 0x4 -#define XER_MIPICAUSE_FIELD_C_LAST 0x7 -#define XER_MIPICAUSE_FIELD_D_FIRST 0x8 -#define XER_MIPICAUSE_FIELD_D_LAST 0xF - - -/* IPI from cause bit 0..15 - - RER invalid - WER sets -*/ -#define XER_MIPISET 0x0140 -#define XER_MIPISET_LAST 0x014F - - -/* Global enable - - RER read - WER clear -*/ -#define XER_MIENG 0x0180 - - -/* Global enable - - RER invalid - WER set -*/ -#define XER_MIENG_SET 0x0184 - -/* Global assert - - RER read - WER clear -*/ -#define XER_MIASG 0x0188 - - -/* Global enable - - RER invalid - WER set -*/ -#define XER_MIASG_SET 0x018C - - -/* IPI partition register - - RER read - WER write -*/ -#define XER_PART 0x0190 -#define XER_IPI0 0x0 -#define XER_IPI1 0x1 -#define XER_IPI2 0x2 -#define XER_IPI3 0x3 - -#define XER_PART_ROUTE_IPI(NUM, FIELD) ((NUM) << ((FIELD) << 2)) - -#define XER_PART_ROUTE_IPI_CAUSE(TO_A, TO_B, TO_C, TO_D) \ - (XER_PART_ROUTE_IPI(TO_A, XER_IPI0) | \ - XER_PART_ROUTE_IPI(TO_B, XER_IPI1) | \ - XER_PART_ROUTE_IPI(TO_C, XER_IPI2) | \ - XER_PART_ROUTE_IPI(TO_D, XER_IPI3)) - -#define XER_IPI_WAKE_EXT_INTERRUPT XCHAL_EXTINT0_NUM -#define XER_IPI_WAKE_CAUSE XER_MIPICAUSE_FIELD_C_FIRST -#define XER_IPI_WAKE_ADDRESS (XER_MIPISET + XER_IPI_WAKE_CAUSE) -#define XER_DEFAULT_IPI_ROUTING XER_PART_ROUTE_IPI_CAUSE(XER_IPI1, XER_IPI0, XER_IPI2, XER_IPI3) - - -/* System configuration ID - - RER read - WER invalid -*/ -#define XER_SYSCFGID 0x01A0 - - -/* RunStall to secondary processors - - RER read - WER write -*/ -#define XER_MPSCORE 0x0200 - - -/* Cache coherency ON - - RER read - WER write -*/ -#define XER_CCON 0x0220 - - diff --git a/src/arch/xtensa/include/xtensa/xtruntime-core-state.h b/src/arch/xtensa/include/xtensa/xtruntime-core-state.h deleted file mode 100644 index 94cc719af822..000000000000 --- a/src/arch/xtensa/include/xtensa/xtruntime-core-state.h +++ /dev/null @@ -1,240 +0,0 @@ -/* xtruntime-core-state.h - core state save area (used eg. by PSO) */ -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/include/xtensa/xtruntime-core-state.h#1 $ */ - -/* - * Copyright (c) 2012-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _XTOS_CORE_STATE_H_ -#define _XTOS_CORE_STATE_H_ - -/* Import STRUCT_xxx macros for defining structures: */ -#include -#include -#include -#if XCHAL_HAVE_IDMA -#include -#endif - -//#define XTOS_PSO_TEST 1 // uncommented for internal PSO testing only - -#define CORE_STATE_SIGNATURE 0xB1C5AFED // pattern that indicates state was saved - - -/* - * Save area for saving entire core state, such as across Power Shut-Off (PSO). - */ - -STRUCT_BEGIN -STRUCT_FIELD (long,4,CS_SA_,signature) // for checking whether state was saved -STRUCT_FIELD (long,4,CS_SA_,restore_label) -STRUCT_FIELD (long,4,CS_SA_,aftersave_label) -STRUCT_AFIELD(long,4,CS_SA_,areg,XCHAL_NUM_AREGS) -#if XCHAL_HAVE_WINDOWED -STRUCT_AFIELD(long,4,CS_SA_,caller_regs,16) // save a max of 16 caller regs -STRUCT_FIELD (long,4,CS_SA_,caller_regs_saved) // flag to show if caller regs saved -#endif -#if XCHAL_HAVE_PSO_CDM -STRUCT_FIELD (long,4,CS_SA_,pwrctl) -#endif -#if XCHAL_HAVE_WINDOWED -STRUCT_FIELD (long,4,CS_SA_,windowbase) -STRUCT_FIELD (long,4,CS_SA_,windowstart) -#endif -STRUCT_FIELD (long,4,CS_SA_,sar) -#if XCHAL_HAVE_EXCEPTIONS -STRUCT_FIELD (long,4,CS_SA_,epc1) -STRUCT_FIELD (long,4,CS_SA_,ps) -STRUCT_FIELD (long,4,CS_SA_,excsave1) -# ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR -STRUCT_FIELD (long,4,CS_SA_,depc) -# endif -#endif -#if XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI >= 2 -STRUCT_AFIELD(long,4,CS_SA_,epc, XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI - 1) -STRUCT_AFIELD(long,4,CS_SA_,eps, XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI - 1) -STRUCT_AFIELD(long,4,CS_SA_,excsave,XCHAL_NUM_INTLEVELS + XCHAL_HAVE_NMI - 1) -#endif -#if XCHAL_HAVE_LOOPS -STRUCT_FIELD (long,4,CS_SA_,lcount) -STRUCT_FIELD (long,4,CS_SA_,lbeg) -STRUCT_FIELD (long,4,CS_SA_,lend) -#endif -#if XCHAL_HAVE_ABSOLUTE_LITERALS -STRUCT_FIELD (long,4,CS_SA_,litbase) -#endif -#if XCHAL_HAVE_VECBASE -STRUCT_FIELD (long,4,CS_SA_,vecbase) -#endif -#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */ -STRUCT_FIELD (long,4,CS_SA_,atomctl) -#endif -#if XCHAL_HAVE_PREFETCH -STRUCT_FIELD (long,4,CS_SA_,prefctl) -#endif -#if XCHAL_USE_MEMCTL -STRUCT_FIELD (long,4,CS_SA_,memctl) -#endif -#if XCHAL_HAVE_CCOUNT -STRUCT_FIELD (long,4,CS_SA_,ccount) -STRUCT_AFIELD(long,4,CS_SA_,ccompare, XCHAL_NUM_TIMERS) -#endif -#if XCHAL_HAVE_INTERRUPTS -STRUCT_FIELD (long,4,CS_SA_,intenable) -STRUCT_FIELD (long,4,CS_SA_,interrupt) -#endif -#if XCHAL_HAVE_DEBUG -STRUCT_FIELD (long,4,CS_SA_,icount) -STRUCT_FIELD (long,4,CS_SA_,icountlevel) -STRUCT_FIELD (long,4,CS_SA_,debugcause) -// DDR not saved -# if XCHAL_NUM_DBREAK -STRUCT_AFIELD(long,4,CS_SA_,dbreakc, XCHAL_NUM_DBREAK) -STRUCT_AFIELD(long,4,CS_SA_,dbreaka, XCHAL_NUM_DBREAK) -# endif -# if XCHAL_NUM_IBREAK -STRUCT_AFIELD(long,4,CS_SA_,ibreaka, XCHAL_NUM_IBREAK) -STRUCT_FIELD (long,4,CS_SA_,ibreakenable) -# endif -#endif -#if XCHAL_NUM_MISC_REGS -STRUCT_AFIELD(long,4,CS_SA_,misc,XCHAL_NUM_MISC_REGS) -#endif -#if XCHAL_HAVE_MEM_ECC_PARITY -STRUCT_FIELD (long,4,CS_SA_,mepc) -STRUCT_FIELD (long,4,CS_SA_,meps) -STRUCT_FIELD (long,4,CS_SA_,mesave) -STRUCT_FIELD (long,4,CS_SA_,mesr) -STRUCT_FIELD (long,4,CS_SA_,mecr) -STRUCT_FIELD (long,4,CS_SA_,mevaddr) -#endif - -/* We put this ahead of TLB and other TIE state, - to keep it within S32I/L32I offset range. */ -#if XCHAL_HAVE_CP -STRUCT_FIELD (long,4,CS_SA_,cpenable) -#endif - -/* TLB state */ -#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR -STRUCT_AFIELD(long,4,CS_SA_,tlbs,8*2) -#endif -#if XCHAL_HAVE_PTP_MMU -/* Compute number of auto-refill (ARF) entries as max of I and D, - to simplify TLB save logic. On the unusual configs with - ITLB ARF != DTLB ARF entries, we'll just end up - saving/restoring some extra entries redundantly. */ -# if XCHAL_DTLB_ARF_ENTRIES_LOG2 + XCHAL_ITLB_ARF_ENTRIES_LOG2 > 4 -# define ARF_ENTRIES 8 -# else -# define ARF_ENTRIES 4 -# endif -STRUCT_FIELD (long,4,CS_SA_,ptevaddr) -STRUCT_FIELD (long,4,CS_SA_,rasid) -STRUCT_FIELD (long,4,CS_SA_,dtlbcfg) -STRUCT_FIELD (long,4,CS_SA_,itlbcfg) -/*** WARNING: past this point, field offsets may be larger than S32I/L32I range ***/ -STRUCT_AFIELD(long,4,CS_SA_,tlbs,((4*ARF_ENTRIES+4)*2+3)*2) -# if XCHAL_HAVE_SPANNING_WAY /* MMU v3 */ -STRUCT_AFIELD(long,4,CS_SA_,tlbs_ways56,(4+8)*2*2) -# endif -#endif -/* MPU state */ -#if XCHAL_HAVE_MPU -STRUCT_AFIELD(long,4,CS_SA_,mpuentry,8*XCHAL_MPU_ENTRIES) -STRUCT_FIELD (long,4,CS_SA_,cacheadrdis) -#endif - -#if XCHAL_HAVE_IDMA -STRUCT_AFIELD(long,4,CS_SA_,idmaregs, IDMA_PSO_SAVE_SIZE) -#endif - -/* TIE state */ -/* NOTE: NCP area is aligned to XCHAL_TOTAL_SA_ALIGN not XCHAL_NCP_SA_ALIGN, - because the offsets of all subsequent coprocessor save areas are relative - to the NCP save area. */ -STRUCT_AFIELD_A(char,1,XCHAL_TOTAL_SA_ALIGN,CS_SA_,ncp,XCHAL_NCP_SA_SIZE) -#if XCHAL_HAVE_CP -#if XCHAL_CP0_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP0_SA_ALIGN,CS_SA_,cp0,XCHAL_CP0_SA_SIZE) -#endif -#if XCHAL_CP1_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP1_SA_ALIGN,CS_SA_,cp1,XCHAL_CP1_SA_SIZE) -#endif -#if XCHAL_CP2_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP2_SA_ALIGN,CS_SA_,cp2,XCHAL_CP2_SA_SIZE) -#endif -#if XCHAL_CP3_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP3_SA_ALIGN,CS_SA_,cp3,XCHAL_CP3_SA_SIZE) -#endif -#if XCHAL_CP4_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP4_SA_ALIGN,CS_SA_,cp4,XCHAL_CP4_SA_SIZE) -#endif -#if XCHAL_CP5_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP5_SA_ALIGN,CS_SA_,cp5,XCHAL_CP5_SA_SIZE) -#endif -#if XCHAL_CP6_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP6_SA_ALIGN,CS_SA_,cp6,XCHAL_CP6_SA_SIZE) -#endif -#if XCHAL_CP7_SA_SIZE > 0 -STRUCT_AFIELD_A(char,1,XCHAL_CP7_SA_ALIGN,CS_SA_,cp7,XCHAL_CP7_SA_SIZE) -#endif -//STRUCT_AFIELD_A(char,1,XCHAL_CP8_SA_ALIGN,CS_SA_,cp8,XCHAL_CP8_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP9_SA_ALIGN,CS_SA_,cp9,XCHAL_CP9_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP10_SA_ALIGN,CS_SA_,cp10,XCHAL_CP10_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP11_SA_ALIGN,CS_SA_,cp11,XCHAL_CP11_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP12_SA_ALIGN,CS_SA_,cp12,XCHAL_CP12_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP13_SA_ALIGN,CS_SA_,cp13,XCHAL_CP13_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP14_SA_ALIGN,CS_SA_,cp14,XCHAL_CP14_SA_SIZE) -//STRUCT_AFIELD_A(char,1,XCHAL_CP15_SA_ALIGN,CS_SA_,cp15,XCHAL_CP15_SA_SIZE) -#endif - -STRUCT_END(XtosCoreState) - - - -// These are part of non-coprocessor state (ncp): -#if XCHAL_HAVE_MAC16 -//STRUCT_FIELD (long,4,CS_SA_,acclo) -//STRUCT_FIELD (long,4,CS_SA_,acchi) -//STRUCT_AFIELD(long,4,CS_SA_,mr, 4) -#endif -#if XCHAL_HAVE_THREADPTR -//STRUCT_FIELD (long,4,CS_SA_,threadptr) -#endif -#if XCHAL_HAVE_S32C1I -//STRUCT_FIELD (long,4,CS_SA_,scompare1) -#endif -#if XCHAL_HAVE_BOOLEANS -//STRUCT_FIELD (long,4,CS_SA_,br) -#endif - -// Not saved: -// EXCCAUSE ?? -// DEBUGCAUSE ?? -// EXCVADDR ?? -// DDR -// INTERRUPT -// ... locked cache lines ... - -#endif /* _XTOS_CORE_STATE_H_ */ - diff --git a/src/arch/xtensa/include/xtensa/xtruntime-frames.h b/src/arch/xtensa/include/xtensa/xtruntime-frames.h deleted file mode 100644 index 0b6e0d181821..000000000000 --- a/src/arch/xtensa/include/xtensa/xtruntime-frames.h +++ /dev/null @@ -1,202 +0,0 @@ -/* xtruntime-frames.h - exception stack frames for single-threaded run-time */ -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/include/xtensa/xtruntime-frames.h#1 $ */ - -/* - * Copyright (c) 2002-2012 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef _XTRUNTIME_FRAMES_H_ -#define _XTRUNTIME_FRAMES_H_ - -#include - -/* Macros that help define structures for both C and assembler: */ -#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) -#define STRUCT_BEGIN .pushsection .text; .struct 0 -#define STRUCT_FIELD(ctype,size,pre,name) pre##name: .space size -#define STRUCT_AFIELD(ctype,size,pre,name,n) pre##name: .if n ; .space (size)*(n) ; .endif -#define STRUCT_AFIELD_A(ctype,size,align,pre,name,n) .balign align ; pre##name: .if n ; .space (size)*(n) ; .endif -#define STRUCT_END(sname) sname##Size:; .popsection -#else /*_ASMLANGUAGE||__ASSEMBLER__*/ -#define STRUCT_BEGIN typedef struct { -#define STRUCT_FIELD(ctype,size,pre,name) ctype name; -#define STRUCT_AFIELD(ctype,size,pre,name,n) ctype name[n]; -#define STRUCT_AFIELD_A(ctype,size,align,pre,name,n) ctype name[n] __attribute__((aligned(align))); -#define STRUCT_END(sname) } sname; -#endif /*_ASMLANGUAGE||__ASSEMBLER__*/ - -/* Coprocessors masks. - * NOTE: currently only 2 supported. - */ -#define CP0_MASK (1 << 0) -#define CP1_MASK (1 << 1) - -/* - * Kernel vector mode exception stack frame. - * - * NOTE: due to the limited range of addi used in the current - * kernel exception vector, and the fact that historically - * the vector is limited to 12 bytes, the size of this - * stack frame is limited to 128 bytes (currently at 64). - */ -STRUCT_BEGIN -STRUCT_FIELD (long,4,KEXC_,pc) /* "parm" */ -STRUCT_FIELD (long,4,KEXC_,ps) -STRUCT_AFIELD(long,4,KEXC_,areg, 4) /* a12 .. a15 */ -STRUCT_FIELD (long,4,KEXC_,sar) /* "save" */ -#if XCHAL_HAVE_LOOPS -STRUCT_FIELD (long,4,KEXC_,lcount) -STRUCT_FIELD (long,4,KEXC_,lbeg) -STRUCT_FIELD (long,4,KEXC_,lend) -#endif -#if XCHAL_HAVE_MAC16 -STRUCT_FIELD (long,4,KEXC_,acclo) -STRUCT_FIELD (long,4,KEXC_,acchi) -STRUCT_AFIELD(long,4,KEXC_,mr, 4) -#endif -STRUCT_END(KernelFrame) - - -/* - * User vector mode exception stack frame: - * - * WARNING: if you modify this structure, you MUST modify the - * computation of the pad size (ALIGNPAD) accordingly. - */ -STRUCT_BEGIN -STRUCT_FIELD (long,4,UEXC_,pc) -STRUCT_FIELD (long,4,UEXC_,ps) -STRUCT_FIELD (long,4,UEXC_,sar) -STRUCT_FIELD (long,4,UEXC_,vpri) -STRUCT_FIELD (long,4,UEXC_,a0) -STRUCT_FIELD (long,4,UEXC_,a1) -STRUCT_FIELD (long,4,UEXC_,a2) -STRUCT_FIELD (long,4,UEXC_,a3) -STRUCT_FIELD (long,4,UEXC_,a4) -STRUCT_FIELD (long,4,UEXC_,a5) -STRUCT_FIELD (long,4,UEXC_,a6) -STRUCT_FIELD (long,4,UEXC_,a7) -STRUCT_FIELD (long,4,UEXC_,a8) -STRUCT_FIELD (long,4,UEXC_,a9) -STRUCT_FIELD (long,4,UEXC_,a10) -STRUCT_FIELD (long,4,UEXC_,a11) -STRUCT_FIELD (long,4,UEXC_,a12) -STRUCT_FIELD (long,4,UEXC_,a13) -STRUCT_FIELD (long,4,UEXC_,a14) -STRUCT_FIELD (long,4,UEXC_,a15) -STRUCT_FIELD (long,4,UEXC_,exccause) /* NOTE: can probably rid of this one (pass direct) */ -STRUCT_FIELD (long,4,UEXC_,align1) /* alignment to 8 bytes */ -#if XCHAL_HAVE_LOOPS -STRUCT_FIELD (long,4,UEXC_,lcount) -STRUCT_FIELD (long,4,UEXC_,lbeg) -STRUCT_FIELD (long,4,UEXC_,lend) -STRUCT_FIELD (long,4,UEXC_,align2) /* alignment to 8 bytes */ -#endif -#if XCHAL_HAVE_MAC16 -STRUCT_FIELD (long,4,UEXC_,acclo) -STRUCT_FIELD (long,4,UEXC_,acchi) -STRUCT_AFIELD(long,4,UEXC_,mr, 4) -#endif -#if (XCHAL_CP_MASK & CP0_MASK) -STRUCT_AFIELD_A (long,4,XCHAL_TOTAL_SA_ALIGN,UEXC_,cp0, XCHAL_CP0_SA_SIZE / 4) -#endif -#if (XCHAL_CP_MASK & CP1_MASK) -STRUCT_AFIELD_A (long,4,XCHAL_TOTAL_SA_ALIGN,UEXC_,cp1, XCHAL_CP1_SA_SIZE / 4) -#endif -/* ALIGNPAD is the 16-byte alignment padding. */ -#define ALIGNPAD ((2 + XCHAL_HAVE_MAC16*2 + ((XCHAL_CP0_SA_SIZE%16)/4) + ((XCHAL_CP1_SA_SIZE%16)/4)) & 3) -#if ALIGNPAD -STRUCT_AFIELD(long,4,UEXC_,pad, ALIGNPAD) /* 16-byte alignment padding */ -#endif -/*STRUCT_AFIELD_A(char,1,XCHAL_CPEXTRA_SA_ALIGN,UEXC_,ureg, (XCHAL_CPEXTRA_SA_SIZE+3)&-4)*/ /* not used */ -STRUCT_END(UserFrame) - -/* - * xtos_structures_pointers contains ptrs to all structures created for - * each processor individually. - * - * To access the core specific structure from ASM (after threadptr is set): - * xtos_addr_percore a13, xtos_interrupt_table - */ -STRUCT_BEGIN -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_enabled) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_intstruct) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_interrupt_table) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_interrupt_mask_table) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_stack_for_interrupt_1) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_stack_for_interrupt_2) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_stack_for_interrupt_3) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_stack_for_interrupt_4) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_stack_for_interrupt_5) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_interrupt_ctx) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_saved_ctx) -STRUCT_FIELD(void*,4,XTOS_PTR_TO_,xtos_saved_sp) -STRUCT_END(xtos_structures_pointers) - -/* - * xtos_task_context contains information about currently - * executed task - */ - -#define XTOS_TASK_CONTEXT_OWN_STACK 1 - -STRUCT_BEGIN -STRUCT_FIELD (UserFrame*,4,TC_,stack_pointer) -STRUCT_FIELD (void*,4,TC_,stack_base) -STRUCT_FIELD (long,4,TC_,stack_size) -STRUCT_FIELD (long,4,TC_,flags) -STRUCT_END(xtos_task_context) - -#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) - - -/* Check for UserFrameSize small enough not to require rounding...: */ - /* Skip 16-byte save area, then 32-byte space for 8 regs of call12 - * (which overlaps with 16-byte GCC nested func chaining area), - * then exception stack frame: */ - .set UserFrameTotalSize, 16+32+UserFrameSize - /* Greater than 112 bytes? (max range of ADDI, both signs, when aligned to 16 bytes): */ - .ifgt UserFrameTotalSize-112 - /* Round up to 256-byte multiple to accelerate immediate adds: */ - .set UserFrameTotalSize, ((UserFrameTotalSize+255) & 0xFFFFFF00) - .endif -# define ESF_TOTALSIZE UserFrameTotalSize - -#endif /* _ASMLANGUAGE || __ASSEMBLER__ */ - - -#if XCHAL_NUM_CONTEXTS > 1 -/* Structure of info stored on new context's stack for setup: */ -STRUCT_BEGIN -STRUCT_FIELD (long,4,INFO_,sp) -STRUCT_FIELD (long,4,INFO_,arg1) -STRUCT_FIELD (long,4,INFO_,funcpc) -STRUCT_FIELD (long,4,INFO_,prevps) -STRUCT_END(SetupInfo) -#endif - - -#define KERNELSTACKSIZE 1024 - - -#endif /* _XTRUNTIME_FRAMES_H_ */ - diff --git a/src/arch/xtensa/include/xtensa/xtruntime.h b/src/arch/xtensa/include/xtensa/xtruntime.h deleted file mode 100644 index d5d1f1dd3ff2..000000000000 --- a/src/arch/xtensa/include/xtensa/xtruntime.h +++ /dev/null @@ -1,237 +0,0 @@ -/* - * xtruntime.h -- general C definitions for single-threaded run-time - * - * Copyright (c) 2002-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTRUNTIME_H -#define XTRUNTIME_H - -#include -#include -#include - -#ifndef XTSTR -#define _XTSTR(x) # x -#define XTSTR(x) _XTSTR(x) -#endif - -/* _xtos_core_shutoff() flags parameter values: */ -#define XTOS_KEEPON_MEM 0x00000100 /* ==PWRCTL_MEM_WAKEUP */ -#define XTOS_KEEPON_MEM_SHIFT 8 -#define XTOS_KEEPON_DEBUG 0x00001000 /* ==PWRCTL_DEBUG_WAKEUP */ -#define XTOS_KEEPON_DEBUG_SHIFT 12 - -#define XTOS_IDMA_NO_WAIT 0x00010000 /* Do not wait for idma to finish. Disable if necessary */ -#define XTOS_IDMA_WAIT_STANDBY 0x00020000 /* Also treat standby state as the end of wait */ - -#define XTOS_COREF_PSO 0x00000001 /* do power shutoff */ -#define XTOS_COREF_PSO_SHIFT 0 - -#define _xtos_set_execption_handler _xtos_set_exception_handler /* backward compatibility */ -#define _xtos_set_saved_intenable _xtos_ints_on /* backward compatibility */ -#define _xtos_clear_saved_intenable _xtos_ints_off /* backward compatibility */ - -#if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__) - -#ifdef __cplusplus -extern "C" { -#endif - -#if defined(XTOS_MISRA) -typedef void (_xtos_handler_func)(void *); -#elif defined(__cplusplus) -typedef void (_xtos_handler_func)(...); -#else -typedef void (_xtos_handler_func)(); -#endif -typedef _xtos_handler_func *_xtos_handler; - -/* - * unsigned XTOS_SET_INTLEVEL(int intlevel); - * This macro sets the current interrupt level. - * The 'intlevel' parameter must be a constant. - * This macro returns a 32-bit value that must be passed to - * XTOS_RESTORE_INTLEVEL() to restore the previous interrupt level. - * XTOS_RESTORE_JUST_INTLEVEL() also does this, but in XEA2 configs - * it restores only PS.INTLEVEL rather than the entire PS register - * and thus is slower. - */ -#if !XCHAL_HAVE_INTERRUPTS -# define XTOS_SET_INTLEVEL(intlevel) 0 -# define XTOS_SET_MIN_INTLEVEL(intlevel) 0 -# define XTOS_RESTORE_INTLEVEL(restoreval) -# define XTOS_RESTORE_JUST_INTLEVEL(restoreval) -#elif XCHAL_HAVE_XEA2 -/* In XEA2, we can simply safely set PS.INTLEVEL directly: */ -/* NOTE: these asm macros don't modify memory, but they are marked - * as such to act as memory access barriers to the compiler because - * these macros are sometimes used to delineate critical sections; - * function calls are natural barriers (the compiler does not know - * whether a function modifies memory) unless declared to be inlined. */ -# define XTOS_SET_INTLEVEL(intlevel) ({ unsigned __tmp; \ - __asm__ __volatile__( "rsil %0, " XTSTR(intlevel) "\n" \ - : "=a" (__tmp) : : "memory" ); \ - __tmp;}) -# define XTOS_SET_MIN_INTLEVEL(intlevel) ({ unsigned __tmp, __tmp2, __tmp3; \ - __asm__ __volatile__( "rsr.ps %0\n" /* get old (current) PS.INTLEVEL */ \ - "movi %2, " XTSTR(intlevel) "\n" \ - "extui %1, %0, 0, 4\n" /* keep only INTLEVEL bits of parameter */ \ - "blt %2, %1, 1f\n" \ - "rsil %0, " XTSTR(intlevel) "\n" \ - "1:\n" \ - : "=a" (__tmp), "=&a" (__tmp2), "=&a" (__tmp3) : : "memory" ); \ - __tmp;}) -# define XTOS_RESTORE_INTLEVEL(restoreval) do{ unsigned __tmp = (restoreval); \ - __asm__ __volatile__( "wsr.ps %0 ; rsync\n" \ - : : "a" (__tmp) : "memory" ); \ - }while(0) -# define XTOS_RESTORE_JUST_INTLEVEL(restoreval) _xtos_set_intlevel(restoreval) -#else -/* In XEA1, we have to rely on INTENABLE register virtualization: */ -extern unsigned _xtos_set_vpri( unsigned vpri ); -extern unsigned _xtos_vpri_enabled; /* current virtual priority */ -# define XTOS_SET_INTLEVEL(intlevel) _xtos_set_vpri(~XCHAL_INTLEVEL_ANDBELOW_MASK(intlevel)) -# define XTOS_SET_MIN_INTLEVEL(intlevel) _xtos_set_vpri(_xtos_vpri_enabled & ~XCHAL_INTLEVEL_ANDBELOW_MASK(intlevel)) -# define XTOS_RESTORE_INTLEVEL(restoreval) _xtos_set_vpri(restoreval) -# define XTOS_RESTORE_JUST_INTLEVEL(restoreval) _xtos_set_vpri(restoreval) -#endif - -/* - * The following macros build upon the above. They are generally used - * instead of invoking the SET_INTLEVEL and SET_MIN_INTLEVEL macros directly. - * They all return a value that can be used with XTOS_RESTORE_INTLEVEL() - * or _xtos_restore_intlevel() or _xtos_restore_just_intlevel() to restore - * the effective interrupt level to what it was before the macro was invoked. - * In XEA2, the DISABLE macros are much faster than the MASK macros - * (in all configs, DISABLE sets the effective interrupt level, whereas MASK - * makes ensures the effective interrupt level is at least the level given - * without lowering it; in XEA2 with INTENABLE virtualization, these macros - * affect PS.INTLEVEL only, not the virtual priority, so DISABLE has partial - * MASK semantics). - * - * A typical critical section sequence might be: - * unsigned rval = XTOS_DISABLE_EXCM_INTERRUPTS; - * ... critical section ... - * XTOS_RESTORE_INTLEVEL(rval); - */ -/* Enable all interrupts (those activated with _xtos_ints_on()): */ -#define XTOS_ENABLE_INTERRUPTS XTOS_SET_INTLEVEL(0) -/* Disable low priority level interrupts (they can interact with the OS): */ -#define XTOS_DISABLE_LOWPRI_INTERRUPTS XTOS_SET_INTLEVEL(XCHAL_NUM_LOWPRI_LEVELS) -#define XTOS_MASK_LOWPRI_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XCHAL_NUM_LOWPRI_LEVELS) -/* Disable interrupts that can interact with the OS: */ -#define XTOS_DISABLE_EXCM_INTERRUPTS XTOS_SET_INTLEVEL(XCHAL_EXCM_LEVEL) -#define XTOS_MASK_EXCM_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XCHAL_EXCM_LEVEL) -#if 0 /* XTOS_LOCK_LEVEL is not exported to applications */ -/* Disable interrupts that can interact with the OS, or manipulate virtual INTENABLE: */ -#define XTOS_DISABLE_LOCK_INTERRUPTS XTOS_SET_INTLEVEL(XTOS_LOCK_LEVEL) -#define XTOS_MASK_LOCK_INTERRUPTS XTOS_SET_MIN_INTLEVEL(XTOS_LOCK_LEVEL) -#endif -/* Disable ALL interrupts (not for common use, particularly if one's processor - * configuration has high-level interrupts and one cares about their latency): */ -#define XTOS_DISABLE_ALL_INTERRUPTS XTOS_SET_INTLEVEL(15) - -/* These two are deprecated. Use the newer functions below. */ -extern unsigned int _xtos_ints_off( unsigned int mask ); -extern unsigned int _xtos_ints_on( unsigned int mask ); - -/* Newer functions to enable/disable the specified interrupt. */ -static inline void _xtos_interrupt_enable(unsigned int intnum) -{ - _xtos_ints_on(1 << intnum); -} - -static inline void _xtos_interrupt_disable(unsigned int intnum) -{ - _xtos_ints_off(1 << intnum); -} - -extern unsigned _xtos_set_intlevel( int intlevel ); -extern unsigned _xtos_set_min_intlevel( int intlevel ); -extern unsigned _xtos_restore_intlevel( unsigned restoreval ); -extern unsigned _xtos_restore_just_intlevel( unsigned restoreval ); -extern _xtos_handler _xtos_set_interrupt_handler( int n, _xtos_handler f ); -extern _xtos_handler _xtos_set_interrupt_handler_arg( int n, _xtos_handler f, void *arg ); -extern _xtos_handler _xtos_set_exception_handler( int n, _xtos_handler f ); - -extern void _xtos_memep_initrams( void ); -extern void _xtos_memep_enable( int flags ); - -/* For use with the tiny LSP (see LSP reference manual). */ -#if XCHAL_NUM_INTLEVELS >= 1 -extern void _xtos_dispatch_level1_interrupts( void ); -#endif -#if XCHAL_NUM_INTLEVELS >= 2 -extern void _xtos_dispatch_level2_interrupts( void ); -#endif -#if XCHAL_NUM_INTLEVELS >= 3 -extern void _xtos_dispatch_level3_interrupts( void ); -#endif -#if XCHAL_NUM_INTLEVELS >= 4 -extern void _xtos_dispatch_level4_interrupts( void ); -#endif -#if XCHAL_NUM_INTLEVELS >= 5 -extern void _xtos_dispatch_level5_interrupts( void ); -#endif -#if XCHAL_NUM_INTLEVELS >= 6 -extern void _xtos_dispatch_level6_interrupts( void ); -#endif - -/* Deprecated (but kept because they were documented): */ -extern unsigned int _xtos_read_ints( void ); -extern void _xtos_clear_ints( unsigned int mask ); - - -/* Power shut-off related routines. */ -extern int _xtos_core_shutoff(unsigned flags); -extern int _xtos_core_save(unsigned flags, XtosCoreState *savearea, void *code); -extern void _xtos_core_restore(unsigned retvalue, XtosCoreState *savearea); - - -#if XCHAL_NUM_CONTEXTS > 1 -extern unsigned _xtos_init_context(int context_num, int stack_size, - _xtos_handler_func *start_func, int arg1); -#endif - -/* Deprecated: */ -#if XCHAL_NUM_TIMERS > 0 -extern void _xtos_timer_0_delta( int cycles ); -#endif -#if XCHAL_NUM_TIMERS > 1 -extern void _xtos_timer_1_delta( int cycles ); -#endif -#if XCHAL_NUM_TIMERS > 2 -extern void _xtos_timer_2_delta( int cycles ); -#endif -#if XCHAL_NUM_TIMERS > 3 -extern void _xtos_timer_3_delta( int cycles ); -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* !_ASMLANGUAGE && !__ASSEMBLER__ */ - -#endif /* XTRUNTIME_H */ - diff --git a/src/arch/xtensa/init.c b/src/arch/xtensa/init.c deleted file mode 100644 index 2f15c4b67362..000000000000 --- a/src/arch/xtensa/init.c +++ /dev/null @@ -1,210 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2016 Intel Corporation. All rights reserved. -// -// Author: Liam Girdwood - -/** - * \file arch/xtensa/init.c - * \brief Xtensa initialization functions - * \authors Liam Girdwood - */ - -#include "xtos-internal.h" -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include - -/* UserFrame's size needs to be 16 bytes aligned */ -STATIC_ASSERT((sizeof(UserFrame) % 16) == 0, invalid_UserFrame_alignment); - -/* verify xtos_active_task offset */ -STATIC_ASSERT(offsetof(struct thread_data, xtos_active_task) == - XTOS_TASK_CONTEXT_OFFSET, invalid_xtos_active_task_offset); - -#if CONFIG_DEBUG_LOCKS -/** \brief Debug lock. */ -uint32_t lock_dbg_atomic; - -/** \brief Debug locks per user. */ -uint32_t lock_dbg_user[DBG_LOCK_USERS] = {0}; -#endif -#if CONFIG_NO_SECONDARY_CORE_ROM -void *shared_vecbase_ptr __aligned(PLATFORM_DCACHE_ALIGN); -#endif -/** \brief Core context for primary core. */ -static struct core_context primary_core_ctx; - -/** \brief Core context pointers for all the cores. */ -struct core_context *core_ctx_ptr[CONFIG_CORE_COUNT] = { 0 }; - -/** \brief Xtos core data for primary core. */ -struct xtos_core_data primary_core_data; - -/** \brief Xtos core data pointers for all the cores. */ -struct xtos_core_data *core_data_ptr[CONFIG_CORE_COUNT] = { 0 }; - -/** - * \brief Initializes core specific data. - */ -static void initialize_pointers_per_core(void) -{ - int core = cpu_get_id(); - struct xtos_core_data *core_data; - xtos_structures_pointers *p; - - if (core == PLATFORM_PRIMARY_CORE_ID) { - primary_core_data.thread_data_ptr = &primary_core_ctx.td; - core_ctx_ptr[PLATFORM_PRIMARY_CORE_ID] = &primary_core_ctx; - core_data_ptr[PLATFORM_PRIMARY_CORE_ID] = &primary_core_data; - } - - cpu_write_threadptr((int)core_ctx_ptr[core]); - - core_data = core_data_ptr[core]; - - p = &core_data->thread_data_ptr->xtos_ptrs; - p->xtos_interrupt_ctx = &core_data->xtos_interrupt_ctx; - p->xtos_saved_sp = &core_data->xtos_saved_sp; -#if CONFIG_XT_INTERRUPT_LEVEL_1 - p->xtos_stack_for_interrupt_1 = core_data->xtos_stack_for_interrupt_1; -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_2 - p->xtos_stack_for_interrupt_2 = core_data->xtos_stack_for_interrupt_2; -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_3 - p->xtos_stack_for_interrupt_3 = core_data->xtos_stack_for_interrupt_3; -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_4 - p->xtos_stack_for_interrupt_4 = core_data->xtos_stack_for_interrupt_4; -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_5 - p->xtos_stack_for_interrupt_5 = core_data->xtos_stack_for_interrupt_5; -#endif -#if CONFIG_MULTICORE - p->xtos_enabled = &core_data->xtos_int_data.xtos_enabled; - p->xtos_intstruct = &core_data->xtos_int_data; - p->xtos_interrupt_table = - &core_data->xtos_int_data.xtos_interrupt_table.array[0]; - p->xtos_interrupt_mask_table = - &core_data->xtos_int_data.xtos_interrupt_mask_table[0]; -#endif -} - -/** - * \brief Called in the case of exception. - */ -static void exception(void) -{ - uintptr_t epc1; - - __asm__ __volatile__("rsr %0, EPC1" : "=a" (epc1) : : "memory"); - - /* now save panic dump */ - /* TODO: we could invoke a GDB stub here */ - panic_dump(SOF_IPC_PANIC_EXCEPTION, NULL, &epc1); -} - -/** - * \brief Registers exception handlers. - */ -static void register_exceptions(void) -{ - - /* 0 - 9 */ - _xtos_set_exception_handler( - EXCCAUSE_ILLEGAL, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_SYSCALL, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_INSTR_ERROR, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_LOAD_STORE_ERROR, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_ALLOCA, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_DIVIDE_BY_ZERO, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_SPECULATION, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_PRIVILEGED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_UNALIGNED, (void *)&exception); - - /* Reserved 10..11 */ - - _xtos_set_exception_handler( - EXCCAUSE_INSTR_DATA_ERROR, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_LOAD_STORE_DATA_ERROR, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_INSTR_ADDR_ERROR, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_LOAD_STORE_ADDR_ERROR, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_ITLB_MISS, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_ITLB_MULTIHIT, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_INSTR_RING, (void *)&exception); - - /* Reserved 19 */ - - _xtos_set_exception_handler( - EXCCAUSE_INSTR_PROHIBITED, (void *)&exception); - - /* Reserved 21..23 */ - _xtos_set_exception_handler( - EXCCAUSE_DTLB_MISS, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_DTLB_MULTIHIT, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_LOAD_STORE_RING, (void *)&exception); - - /* Reserved 27 */ - _xtos_set_exception_handler( - EXCCAUSE_LOAD_PROHIBITED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_STORE_PROHIBITED, (void *)&exception); - - /* Reserved 30..31 */ - _xtos_set_exception_handler( - EXCCAUSE_CP0_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP1_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP2_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP3_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP4_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP5_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP6_DISABLED, (void *)&exception); - _xtos_set_exception_handler( - EXCCAUSE_CP7_DISABLED, (void *)&exception); - - /* Reserved 40..63 */ -} - -/** - * \brief Initializes architecture. - * \return Error status. - */ -int arch_init(void) -{ - initialize_pointers_per_core(); - register_exceptions(); - return 0; -} diff --git a/src/arch/xtensa/lib/CMakeLists.txt b/src/arch/xtensa/lib/CMakeLists.txt deleted file mode 100644 index f31326299f33..000000000000 --- a/src/arch/xtensa/lib/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -add_local_sources(sof notifier.c) - -if (CONFIG_AMS) - add_local_sources(sof ams.c) -endif() - -if (CONFIG_MULTICORE) - add_local_sources(sof cpu.c) -endif() diff --git a/src/arch/xtensa/lib/ams.c b/src/arch/xtensa/lib/ams.c deleted file mode 100644 index 87fc1f482f2f..000000000000 --- a/src/arch/xtensa/lib/ams.c +++ /dev/null @@ -1,26 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2023 Intel Corporation. All rights reserved. -// -// Author: Krzysztof Frydryk - -/** - * \file - * \brief Xtensa Asynchronous Messaging Service implementation file - * \authors Krzysztof Frydryk - */ - -#include -#include -#include - -struct async_message_service **arch_ams_get(void) -{ -#if CONFIG_AMS - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - return &ctx->ams; -#else - return NULL; -#endif -} diff --git a/src/arch/xtensa/lib/cpu.c b/src/arch/xtensa/lib/cpu.c deleted file mode 100644 index 7910a02eff83..000000000000 --- a/src/arch/xtensa/lib/cpu.c +++ /dev/null @@ -1,257 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2018 Intel Corporation. All rights reserved. -// -// Author: Tomasz Lauda - -/** - * \file - * \brief Xtensa CPU implementation file - * \authors Tomasz Lauda - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -extern struct core_context *core_ctx_ptr[CONFIG_CORE_COUNT]; -extern struct xtos_core_data *core_data_ptr[CONFIG_CORE_COUNT]; - -static uint32_t active_cores_mask = BIT(PLATFORM_PRIMARY_CORE_ID); - -#if CONFIG_NO_SECONDARY_CORE_ROM -extern void *shared_vecbase_ptr; -extern uint8_t _WindowOverflow4[]; - -/** - * \brief This function will allocate memory for shared secondary cores - * dynamic vectors and set global pointer shared_vecbase_ptr - */ -static void alloc_shared_secondary_cores_objects(void) -{ - uint8_t *dynamic_vectors; - - dynamic_vectors = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, 0, SOF_DYNAMIC_VECTORS_SIZE); - if (dynamic_vectors == NULL) - sof_panic(SOF_IPC_PANIC_MEM); - - shared_vecbase_ptr = dynamic_vectors; - dcache_writeback_region(&shared_vecbase_ptr, - sizeof(shared_vecbase_ptr)); -} - -/** - * \brief This function will copy dynamic vectors from _WindowOverflow4 - * to shared shared_vecbase_ptr used in alternate reset vector - */ -static void unpack_dynamic_vectors(void) -{ - void *dyn_vec_start_addr = _WindowOverflow4; - - memcpy_s(shared_vecbase_ptr, SOF_DYNAMIC_VECTORS_SIZE, - dyn_vec_start_addr, SOF_DYNAMIC_VECTORS_SIZE); - dcache_writeback_invalidate_region(shared_vecbase_ptr, - SOF_DYNAMIC_VECTORS_SIZE); -} -#endif - -int arch_cpu_enable_core(int id) -{ - struct idc_msg power_up = { - IDC_MSG_POWER_UP, IDC_MSG_POWER_UP_EXT, id }; - int ret; - - if (!arch_cpu_is_core_enabled(id)) { - /* Turn on stack memory for core */ - pm_runtime_get(CORE_MEMORY_POW, id); - - /* Power up secondary core */ - pm_runtime_get(PM_RUNTIME_DSP, PWRD_BY_TPLG | id); - - /* allocate resources for core */ - cpu_alloc_core_context(id); - - /* enable IDC interrupt for the secondary core */ - idc_enable_interrupts(id, cpu_get_id()); - -#if CONFIG_NO_SECONDARY_CORE_ROM - /* unpack dynamic vectors if it is the first secondary core */ - if (active_cores_mask == BIT(PLATFORM_PRIMARY_CORE_ID)) { - alloc_shared_secondary_cores_objects(); - unpack_dynamic_vectors(); - } -#endif - /* send IDC power up message */ - ret = idc_send_msg(&power_up, IDC_POWER_UP); - if (ret < 0) - return ret; - - active_cores_mask |= (1 << id); - } - - return 0; -} - -void arch_cpu_disable_core(int id) -{ - struct idc_msg power_down = { - IDC_MSG_POWER_DOWN, IDC_MSG_POWER_DOWN_EXT, id }; - - if (arch_cpu_is_core_enabled(id)) { - idc_send_msg(&power_down, IDC_POWER_DOWN); - - active_cores_mask ^= (1 << id); -#if CONFIG_NO_SECONDARY_CORE_ROM - /* free shared dynamic vectors it was the last secondary core */ - if (active_cores_mask == BIT(PLATFORM_PRIMARY_CORE_ID)) { - rfree(shared_vecbase_ptr); - shared_vecbase_ptr = NULL; - } -#endif - } -} - -int arch_cpu_is_core_enabled(int id) -{ - return active_cores_mask & BIT(id); -} - -int arch_cpu_enabled_cores(void) -{ - return active_cores_mask; -} - -void cpu_alloc_core_context(int core) -{ - struct core_context *core_ctx; - - core_ctx = rzalloc_core_sys(core, sizeof(*core_ctx)); - dcache_writeback_invalidate_region(core_ctx, sizeof(*core_ctx)); - - core_data_ptr[core] = rzalloc_core_sys(core, - sizeof(*core_data_ptr[core])); - core_data_ptr[core]->thread_data_ptr = &core_ctx->td; - dcache_writeback_invalidate_region(core_data_ptr[core], - sizeof(*core_data_ptr[core])); - - dcache_writeback_invalidate_region(core_data_ptr, - sizeof(core_data_ptr)); - - core_ctx_ptr[core] = core_ctx; - dcache_writeback_invalidate_region(core_ctx_ptr, - sizeof(core_ctx_ptr)); - - /* share pointer to sof context */ - dcache_writeback_region(sof_get(), sizeof(*sof_get())); -} - -void cpu_power_down_core(uint32_t flags) -{ - arch_interrupt_global_disable(); - - /* Power down with memory on is performed by secondary cores during - * d0 -> d0ix before they are disabled by primary core. - */ - if (flags & CPU_POWER_DOWN_MEMORY_ON) { - /* disable idc interrupts */ - idc_free(IDC_FREE_IRQ_ONLY); - - /* disable scheduler interrupts */ - schedule_free(SOF_SCHEDULER_FREE_IRQ_ONLY); - - /* data writeback/invalidate */ - dcache_writeback_invalidate_all(); - - /* after writeback/invalidate secondary core is prepared for - * powered off - prepare_d0ix_core_mask flag can be disabled - */ - platform_pm_runtime_prepare_d0ix_dis(cpu_get_id()); - } else { - idc_free(0); - - schedule_free(0); - - free_system_notify(); - - /* free entire sys heap, an instance dedicated for this core */ - free_heap(SOF_MEM_ZONE_SYS); - - dcache_writeback_invalidate_all(); - - /* Turn off stack memory for core */ - pm_runtime_put(CORE_MEMORY_POW, cpu_get_id()); - - pm_runtime_put(PM_RUNTIME_DSP, PWRD_BY_TPLG | cpu_get_id()); - } - - trace_point(0); - - /* arch_wait_for_interrupt() not used, because it will cause panic. - * This code is executed on irq lvl > 0, which is expected. - * Core will be put into reset by host anyway. - */ - while (1) - arch_wait_for_interrupt(0); -} - -int arch_cpu_restore_secondary_cores(void) -{ - struct idc_msg power_up = { IDC_MSG_POWER_UP, IDC_MSG_POWER_UP_EXT }; - int ret, id; - - for (id = 0; id < CONFIG_CORE_COUNT; id++) { - if (arch_cpu_is_core_enabled(id) && id != PLATFORM_PRIMARY_CORE_ID) { - power_up.core = id; - - /* Power up secondary core */ - pm_runtime_get(PM_RUNTIME_DSP, id); - - /* enable IDC interrupt for the secondary core */ - idc_enable_interrupts(id, cpu_get_id()); - - /* send IDC power up message */ - ret = idc_send_msg(&power_up, IDC_POWER_UP); - if (ret < 0) - return ret; - } - } - - return 0; -} - -int arch_cpu_secondary_cores_prepare_d0ix(void) -{ - struct idc_msg prepare_msg = { IDC_MSG_PREPARE_D0ix, - IDC_MSG_PREPARE_D0ix_EXT }; - int ret, id; - - for (id = 0; id < CONFIG_CORE_COUNT; id++) { - if (arch_cpu_is_core_enabled(id) && id != PLATFORM_PRIMARY_CORE_ID) { - prepare_msg.core = id; - - /* send IDC prepare message to all enabled secondary - * cores. - */ - ret = idc_send_msg(&prepare_msg, IDC_BLOCKING); - if (ret < 0) - return ret; - } - } - - return 0; -} diff --git a/src/arch/xtensa/lib/notifier.c b/src/arch/xtensa/lib/notifier.c deleted file mode 100644 index 225fe1ba99c2..000000000000 --- a/src/arch/xtensa/lib/notifier.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2018 Intel Corporation. All rights reserved. -// -// Author: Tomasz Lauda - -/** - * \file - * \brief Xtensa notifier implementation file - * \authors Tomasz Lauda - */ - -#include -#include -#include - -struct notify **arch_notify_get(void) -{ - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - return &ctx->notify; -} diff --git a/src/arch/xtensa/main-entry.S b/src/arch/xtensa/main-entry.S deleted file mode 100644 index 1cfa36fa4136..000000000000 --- a/src/arch/xtensa/main-entry.S +++ /dev/null @@ -1,34 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2017 Intel Corporation. All rights reserved. - * - * Author: Rander Wang - */ - -/* - * Entry point from boot loader. - * Fix link address of this entry to SOF_TEXT_START so that - * it is easy for boot loader to jump to the baseFW because - * the boot loader and baseFW are in different elf file. - */ - - -// Exports -.global _MainEntry - - -/**************************************************************************/ - - .begin literal_prefix .MainEntry - .section .MainEntry.text, "ax" - - .align 4 - .global _MainEntry - -_MainEntry: - - call0 _start - - .size _MainEntry, . - _MainEntry - - .end literal_prefix diff --git a/src/arch/xtensa/schedule/CMakeLists.txt b/src/arch/xtensa/schedule/CMakeLists.txt deleted file mode 100644 index d462729c7245..000000000000 --- a/src/arch/xtensa/schedule/CMakeLists.txt +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -add_local_sources(sof schedule.c task.c) diff --git a/src/arch/xtensa/schedule/schedule.c b/src/arch/xtensa/schedule/schedule.c deleted file mode 100644 index 8ed9ab811eb9..000000000000 --- a/src/arch/xtensa/schedule/schedule.c +++ /dev/null @@ -1,22 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2018 Intel Corporation. All rights reserved. -// -// Author: Tomasz Lauda - -/** - * \file - * \brief Xtensa schedule implementation file - * \authors Tomasz Lauda - */ - -#include -#include -#include - -struct schedulers **arch_schedulers_get(void) -{ - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - return &ctx->schedulers; -} diff --git a/src/arch/xtensa/schedule/task.c b/src/arch/xtensa/schedule/task.c deleted file mode 100644 index e7949152ff92..000000000000 --- a/src/arch/xtensa/schedule/task.c +++ /dev/null @@ -1,130 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Copyright(c) 2017 Intel Corporation. All rights reserved. -// -// Author: Liam Girdwood -// Tomasz Lauda - -/** - * \file - * \brief Arch task implementation file - * \authors Liam Girdwood - * Tomasz Lauda - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -enum task_state task_main_secondary_core(void *data) -{ -#if CONFIG_MULTICORE - /* main audio processing loop */ - while (1) { - /* sleep until next IDC or DMA */ - wait_for_interrupt(0); - } -#endif - - return SOF_TASK_STATE_COMPLETED; -} - -struct task **task_main_get(void) -{ - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - return &ctx->main_task; -} - -volatile void *task_context_get(void) -{ - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - return ctx->td.xtos_active_task; -} - -void task_context_set(void *task_ctx) -{ - struct core_context *ctx = (struct core_context *)cpu_read_threadptr(); - - ctx->td.xtos_active_task = task_ctx; -} - -int task_context_alloc(void **task_ctx) -{ - *task_ctx = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, - sizeof(xtos_task_context)); - if (!*task_ctx) - return -ENOMEM; - return 0; -} - -int task_context_init(void *task_ctx, void *entry, void *arg0, void *arg1, - int task_core, void *stack, int stack_size) -{ - xtos_task_context *ctx = task_ctx; - UserFrame *sp; - - /* allocate stack if not provided */ - if (stack) { - ctx->stack_base = stack; - ctx->stack_size = stack_size; - } else { - ctx->stack_base = rballoc(0, SOF_MEM_CAPS_RAM, - PLATFORM_TASK_DEFAULT_STACK_SIZE); - if (!ctx->stack_base) - return -ENOMEM; - ctx->stack_size = PLATFORM_TASK_DEFAULT_STACK_SIZE; - ctx->flags |= XTOS_TASK_CONTEXT_OWN_STACK; - } - bzero(ctx->stack_base, ctx->stack_size); - - /* set initial stack pointer */ - sp = (UserFrame *)((char *)ctx->stack_base + ctx->stack_size - - sizeof(UserFrame)); - - /* entry point */ - sp->pc = (uint32_t)entry; - - /* a1 is pointer to stack */ - sp->a1 = (uint32_t)sp; - - /* PS_WOECALL4_ABI - window overflow and increment enable - * PS_UM - user vector mode enable - */ - sp->ps = PS_WOECALL4_ABI | PS_UM; - - /* a6 and a7 are the first parameters */ - sp->a6 = (uint32_t)arg0; - sp->a7 = (uint32_t)arg1; - - ctx->stack_pointer = sp; - - return 0; -} - -void task_context_free(void *task_ctx) -{ - xtos_task_context *ctx = task_ctx; - - if (ctx->flags & XTOS_TASK_CONTEXT_OWN_STACK) - rfree(ctx->stack_base); - - ctx->stack_size = 0; - ctx->stack_pointer = NULL; - - rfree(ctx); -} diff --git a/src/arch/xtensa/xtos/CMakeLists.txt b/src/arch/xtensa/xtos/CMakeLists.txt deleted file mode 100644 index 2377a7469428..000000000000 --- a/src/arch/xtensa/xtos/CMakeLists.txt +++ /dev/null @@ -1,96 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause - -set(VECTOR_DEFS - -D__SPLIT__vector - -D__SPLIT__handler - -D__SPLIT__user -) - -# Builds lib for each level from the same source files - -function(sof_xtos_add_level level) - set(lib_name "xlevel${level}") - add_library(${lib_name} STATIC "") - add_local_sources(${lib_name} - int-handler.S - int-vector.S - int-initlevel.S - ) - target_link_libraries(${lib_name} sof_options) - target_compile_definitions(${lib_name} PRIVATE - ${VECTOR_DEFS} - -D_INTERRUPT_LEVEL=${level} - ) -endfunction() - -if(CONFIG_XT_INTERRUPT_LEVEL_1) - list(APPEND VECTOR_DEFS -D__SPLIT__level1int) -endif() - -if(CONFIG_XT_INTERRUPT_LEVEL_2) - list(APPEND VECTOR_DEFS -D__SPLIT__level2) - list(APPEND levels 2) -endif() - -if(CONFIG_XT_INTERRUPT_LEVEL_3) - list(APPEND VECTOR_DEFS -D__SPLIT__level3) - list(APPEND levels 3) -endif() - -if(CONFIG_XT_INTERRUPT_LEVEL_4) - list(APPEND VECTOR_DEFS -D__SPLIT__level4) - list(APPEND levels 4) -endif() - -if(CONFIG_XT_INTERRUPT_LEVEL_5) - list(APPEND VECTOR_DEFS -D__SPLIT__level5) - list(APPEND levels 5) -endif() - -foreach(level ${levels}) - sof_xtos_add_level(${level}) -endforeach() - -add_library(xtos STATIC "") -add_local_sources(xtos - core-restore.S - core-save.S - core-shutoff.S - double-vector.S - debug-vector.S - xea1/exc-alloca-handler.S - xea1/exc-c-wrapper-handler.S - xea2/exc-c-wrapper-handler.S - xea1/exc-return.S - xea2/exc-return.S - exc-sethandler.c - exc-syscall-handler.S - exc-table.S - exc-unhandled.S - interrupt-table.S - int-sethandler.c - xea1/intlevel-restore.S - xea2/intlevel-restore.S - xea1/intlevel-setmin.S - xea2/intlevel-setmin.S - xea1/intlevel-set.S - xea2/intlevel-set.S - xea1/int-lowpri-dispatcher.S - xea2/int-lowpri-dispatcher.S - ints-off.S - ints-on.S - kernel-vector.S - memep-enable.S - memep-initrams.S - memerror-vector.S - nmi-vector.S - xea2/reloc-vectors.S - user-vector.S - xea1/window-vectors.S - xea2/window-vectors.S - stub.c -) -sof_append_relative_path_definitions(xtos) -target_link_libraries(xtos sof_options) -target_compile_definitions(xtos PRIVATE ${VECTOR_DEFS}) - diff --git a/src/arch/xtensa/xtos/_sharedvectors-for-reset.S b/src/arch/xtensa/xtos/_sharedvectors-for-reset.S deleted file mode 100644 index ef60901d53e8..000000000000 --- a/src/arch/xtensa/xtos/_sharedvectors-for-reset.S +++ /dev/null @@ -1,40 +0,0 @@ -// _sharedvectors-for-reset.S -- Reference to pull in a shared reset vector -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/_sharedvectors-for-reset.S#1 $ - -// Copyright (c) 2008 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -/* Multicore build flows can use this file (_sharedvectors-for-reset.o) - by copying it to _sharedvectors.o early in the linker search path - (to override the default one), and updating the memory map or linker - scripts accordingly. - This file pulls in a sharable reset vector (typically - shared-reset-vector.S, which requires the PRID option). */ - -.global _SharedResetVector - -/* The following allows this object file to be pulled in by reference: */ -.text -.global _xtos_sharedvectors_ref_ -.set _xtos_sharedvectors_ref_, 0 - diff --git a/src/arch/xtensa/xtos/_sharedvectors.S b/src/arch/xtensa/xtos/_sharedvectors.S deleted file mode 100644 index def144132519..000000000000 --- a/src/arch/xtensa/xtos/_sharedvectors.S +++ /dev/null @@ -1,37 +0,0 @@ -// _sharedvectors.S -- Reference symbols to pull in any shared vectors -// (default version, used when not sharing any vector) -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/_sharedvectors.S#1 $ - -// Copyright (c) 2008 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -/* Not present by default. Multicore build flows build/use a custom - version of _sharedvectors.o that may pull in shared vectors. */ - -/* .global _SharedResetVector */ - -/* The following allows this object file to be pulled in by reference: */ -.text -.global _xtos_sharedvectors_ref_ -.set _xtos_sharedvectors_ref_, 0 - diff --git a/src/arch/xtensa/xtos/_vectors.S b/src/arch/xtensa/xtos/_vectors.S deleted file mode 100644 index 8c70e3f2514f..000000000000 --- a/src/arch/xtensa/xtos/_vectors.S +++ /dev/null @@ -1,94 +0,0 @@ -// _vectors.S -- Reference symbols to pull in all required vectors -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/_vectors.S#1 $ - -// Copyright (c) 2004, 2006-2007 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include /* for XSHAL_VECTORS_PACKED */ - -.global _ResetVector - -#if XCHAL_HAVE_EXCEPTIONS - -# if XCHAL_HAVE_DEBUG -.global _DebugExceptionVector -# endif - -.global _KernelExceptionVector -.global _UserExceptionVector - -# ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR -.global _DoubleExceptionVector -# endif - -# if XCHAL_HAVE_NMI -.global _NMIExceptionVector -# endif - -# if XCHAL_HAVE_WINDOWED -.global _WindowOverflow4 -.global _WindowUnderflow4 -.global _WindowOverflow8 -.global _WindowUnderflow8 -.global _WindowOverflow12 -.global _WindowUnderflow12 -# endif - -# if XCHAL_HAVE_MEM_ECC_PARITY -.global _MemErrorVector -# endif - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - - -#if (XCHAL_NUM_INTLEVELS >= 2) && (2 != XCHAL_DEBUGLEVEL) -.global _Level2Vector -#endif -#if (XCHAL_NUM_INTLEVELS >= 3) && (3 != XCHAL_DEBUGLEVEL) -.global _Level3Vector -#endif -#if (XCHAL_NUM_INTLEVELS >= 4) && (4 != XCHAL_DEBUGLEVEL) -.global _Level4Vector -#endif -#if (XCHAL_NUM_INTLEVELS >= 5) && (5 != XCHAL_DEBUGLEVEL) -.global _Level5Vector -#endif -#if (XCHAL_NUM_INTLEVELS >= 6) && (6 != XCHAL_DEBUGLEVEL) -.global _Level6Vector -#endif - -#if XCHAL_HAVE_VECBASE && XSHAL_VECTORS_PACKED -.global _RelocVectors -#endif - -/* These don't take up space: */ -.global xthals_hw_configid0 -.global xthals_hw_configid1 -.global xthals_release_major -.global xthals_release_minor - -/* The following allows this object file to be pulled in by reference: */ -.text -.global _xtos_vectors_ref_ -.set _xtos_vectors_ref_, 0 - - diff --git a/src/arch/xtensa/xtos/checkvecsize b/src/arch/xtensa/xtos/checkvecsize deleted file mode 100755 index 70789816de81..000000000000 --- a/src/arch/xtensa/xtos/checkvecsize +++ /dev/null @@ -1,71 +0,0 @@ -# Script to check that vector code is 16 bytes or less -# $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/checkvecsize#1 $ - -# Copyright (c) 2001 Tensilica Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package Xtensa::checkvecsize; - -# Perl library modules -use strict; -use Getopt::Long; -use FileHandle; - -# Program - -use vars qw($objdump $maxsize); - -{ - $::myname = 'checkvecsize'; - - # command line - $maxsize = 16; - die("Usage is: $::myname -objdump prog [-maxsize n] files...\n") - unless &GetOptions("objdump=s" => \$objdump, - "maxsize=i" => \$maxsize) - && @ARGV > 0 && defined($objdump); - my $file; - foreach $file (@ARGV) { - checkvecsize ($file); - } -} - -sub checkvecsize { - my ($file) = @_; - my $od = new FileHandle "${objdump} -h $file|"; - die("$::myname: $!, opening pipe to $objdump -h $file.\n") - unless $od; - while (<$od>) { - if (/^\s*\d+\s+(\S+)\s+([0-9A-Fa-f]{8})\s/) { - my $size = hex($2); - die("$::myname: $file $1 section size is $size bytes.\n") - if $size > $maxsize; - } - } - $od->close(); -} - - -# Local Variables: -# mode:perl -# perl-indent-level:2 -# cperl-indent-level:2 -# End: diff --git a/src/arch/xtensa/xtos/core-restore.S b/src/arch/xtensa/xtos/core-restore.S deleted file mode 100644 index 25fa43f18afb..000000000000 --- a/src/arch/xtensa/xtos/core-restore.S +++ /dev/null @@ -1,607 +0,0 @@ -// core-restore.S -- core state restore routine (used by PSO) -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/core-restore.S#1 $ - -// Copyright (c) 2012-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include -#include -#include -#include -#include -#include -#include -#include "xtos-internal.h" -#include - - .text - - - - // void _xtos_core_restore(unsigned retvalue, XtosCoreState *savearea) - // - // Restore processor state. - // If save area signature is bad, function just returns. - // Else, processor state is restored, and execution resumes - // according to that saved processor state. - // - // On entry: - // Caches are ready to use (initialized or warm, as the case may be). - // - .align 4 - .global _xtos_core_restore - .type _xtos_core_restore,@function -_xtos_core_restore: - abi_entry - - // Check the save area's signature: - movi a5, CORE_STATE_SIGNATURE - l32i a4, a3, CS_SA_signature - movi a6, 0 - bne a4, a5, 1f // if bad, just return - -#if XCHAL_HAVE_INTERRUPTS - rsil a4, 15 // disable interrupts... - wsr.intenable a6 -#endif - - // Here, call0 is used as an unlimited range jump. It does not return. - call0 _xtos_core_restore_nw - -1: abi_return - - .size _xtos_core_restore, . - _xtos_core_restore - - - - // Restore processor state. - // On entry: - // Caches are ready to use (initialized or warm, as the case may be). - // a2 = return value passed to restored processor state - // a3 = pointer to save area to restore from - // INTENABLE = 0 (interrupts all disabled) - // LITBASE = initialized (per reset vector, not restored) - // touching a4..a7 won't overflow - // other registers are mostly undefined - // - .align 4 - .global _xtos_core_restore_nw - .type _xtos_core_restore_nw,@function -_xtos_core_restore_nw: - -#if XCHAL_HAVE_WINDOWED - s32i a2, a3, CS_SA_areg + 2*4 // save a2 thru rotation - wsr.excsave1 a3 // save a3 thru rotation - l32i a6, a3, CS_SA_windowstart // restore windowstart - l32i a5, a3, CS_SA_windowbase // restore windowbase - wsr.windowstart a6 - wsr.windowbase a5 - rsync - // a0-a15 have possibly all changed, so need to reload a3 - rsr.excsave1 a3 // restore a3 - l32i a2, a3, CS_SA_areg + 2*4 // restore a2 (return value) -#endif - - //movi a0, 0 - l32i a0, a3, CS_SA_restore_label // _xtos_core_save_common's return PC - - // Just for consistency... -#if XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_EXCEPTIONS - movi a4, 0x11 - wsr.ps a4 - rsync -#endif - - l32i a5, a3, CS_SA_sar // restore sar - wsr.sar a5 - -#if XCHAL_HAVE_PSO_CDM - // Restore PWRCTL (except ShutProcOffOnPWait, cleared when all is done). - movi a4, XDM_MISC_PWRCTL - movi a7, PWRCTL_CORE_SHUTOFF // aka ShutProcOffOnPWait - rer a6, a4 // read pwrctl - l32i a5, a3, CS_SA_pwrctl // get saved pwrctl - and a7, a7, a6 // keep just ShutProcOffOnPWait bit - or a5, a5, a7 // keep it set if already set (clear later) - wer a5, a4 // restore pwrctl (except ShutProcOffOnPWait) -#endif - - .set _idx, 2 - .rept XCHAL_NUM_INTLEVELS+XCHAL_HAVE_NMI-1 - l32i a5, a3, CS_SA_epc + 4*(_idx-2) - INDEX_SR wsr.epc a5 - l32i a5, a3, CS_SA_eps + 4*(_idx-2) - INDEX_SR wsr.eps a5 - l32i a5, a3, CS_SA_excsave + 4*(_idx-2) - INDEX_SR wsr.excsave a5 - .set _idx, _idx+1 - .endr - -#if XCHAL_HAVE_LOOPS - l32i a5, a3, CS_SA_lbeg - wsr.lbeg a5 - l32i a5, a3, CS_SA_lend - wsr.lend a5 - l32i a5, a3, CS_SA_lcount - wsr.lcount a5 -#endif -#if XCHAL_HAVE_ABSOLUTE_LITERALS - l32i a5, a3, CS_SA_litbase - wsr.litbase a5 -#endif -#if XCHAL_HAVE_VECBASE - l32i a5, a3, CS_SA_vecbase - wsr.vecbase a5 -#endif -#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */ - l32i a5, a3, CS_SA_atomctl - wsr.atomctl a5 -#endif -#if XCHAL_HAVE_PREFETCH - l32i a5, a3, CS_SA_prefctl - wsr.prefctl a5 -#endif -#if XCHAL_USE_MEMCTL - l32i a5, a3, CS_SA_memctl - wsr.memctl a5 -#endif -#if XCHAL_HAVE_DEBUG - // NOTE: restore of debug state is conditional, - // as the power-down and wakeup code might be actively debugged. - rsr.icountlevel a5 - bnez a5, 1f // skip if being single-stepped (not failsafe!) - l32i a5, a3, CS_SA_icount - wsr.icount a5 - l32i a5, a3, CS_SA_icountlevel - wsr.icountlevel a5 -1: - //l32i a5, a3, CS_SA_debugcause // (won't get restored?) - //wsr.debugcause a5 - //l32i a5, a3, CS_SA_ddr - //wsr.ddr a5 -# if XCHAL_NUM_IBREAK - rsr.ibreakenable a6 - bnez a5, 1f // skip restore if already some ibreaks defined - - .set _idx, 0 - .rept XCHAL_NUM_IBREAK - l32i a5, a3, CS_SA_ibreaka + 4*_idx - INDEX_SR wsr.ibreaka a5 - .set _idx, _idx+1 - .endr - - l32i a5, a3, CS_SA_ibreakenable - wsr.ibreakenable a5 -1: -# endif - .set _idx, 0 - .rept XCHAL_NUM_DBREAK - INDEX_SR rsr.dbreakc a6 - bbsi.l a6, 30, 1f // skip restore of that dbreak if already active - bbsi.l a6, 31, 1f // ditto - l32i a5, a3, CS_SA_dbreaka + 4*_idx - INDEX_SR wsr.dbreaka a5 - l32i a5, a3, CS_SA_dbreakc + 4*_idx - INDEX_SR wsr.dbreakc a5 -1: - .set _idx, _idx+1 - .endr -#endif - - .set _idx, 0 - .rept XCHAL_NUM_MISC_REGS - l32i a5, a3, CS_SA_misc + 4*_idx - INDEX_SR wsr.misc a5 - .set _idx, _idx+1 - .endr - -#if XCHAL_HAVE_MEM_ECC_PARITY - l32i a5, a3, CS_SA_mepc - wsr.mepc a5 - l32i a5, a3, CS_SA_meps - wsr.meps a5 - l32i a5, a3, CS_SA_mesave - wsr.mesave a5 - l32i a5, a3, CS_SA_mesr - wsr.mesr a5 - l32i a5, a3, CS_SA_mecr - wsr.mecr a5 - l32i a5, a3, CS_SA_mevaddr - wsr.mevaddr a5 -#endif - - /* TIE state */ - addi a4, a3, CS_SA_ncp - xchal_ncp_load a4, a5,a6,a7,a8 // restore non-coprocessor state -#if XCHAL_HAVE_CP - movi a6, -1 - wsr.cpenable a6 // enable all coprocessors - rsync - xchal_cp0_load a4, a5,a6,a7,a8 continue=1 - xchal_cp1_load a4, a5,a6,a7,a8 continue=1 - xchal_cp2_load a4, a5,a6,a7,a8 continue=1 - xchal_cp3_load a4, a5,a6,a7,a8 continue=1 - xchal_cp4_load a4, a5,a6,a7,a8 continue=1 - xchal_cp5_load a4, a5,a6,a7,a8 continue=1 - xchal_cp6_load a4, a5,a6,a7,a8 continue=1 - xchal_cp7_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp8_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp9_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp10_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp11_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp12_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp13_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp14_load a4, a5,a6,a7,a8 continue=1 - //xchal_cp15_load a4, a5,a6,a7,a8 continue=1 - l32i a5, a3, CS_SA_cpenable - wsr.cpenable a5 -#endif - - /* TLB state (for known MMU types only, not internal custom) */ - // FIXME FIXME FIXME TODO: - // This restore code does not work in the general case, - // for CaXLT or full MMU, in particular when any address mappings - // were active when saved, that don't match reset state and affect - // code and data currently being accessed for restore. -#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR - addi a4, a3, CS_SA_tlbs // where to start loading TLB entry info - movi a5, 0x20000000 - movi a6, 0 -1: - l32i a7, a4, 0 - wdtlb a7, a6 // write DTLB entry PPN + CA - dsync - l32i a7, a4, 4 - j 2f - // Ensure WITLB and ISYNC are in same cache line, when writing ITLB - // entry that maps this currently running code - // (micro-architecture defined sequence): - .begin no-transform - .align 16 -2: witlb a7, a6 // write ITLB entry PPN + CA - isync - .end no-transform - nop - nop - addi a4, a4, 8 - add a6, a6, a5 - bnez a6, 1b - -#elif XCHAL_HAVE_PTP_MMU - addi a4, a3, CS_SA_tlbs // where to start storing TLB entry info - movi a10, _xtos_pso_tlbmap - movi a11, _xtos_pso_tlbmap_end - l32i a14, a3, CS_SA_dtlbcfg - l32i a15, a3, CS_SA_itlbcfg - wsr.dtlbcfg a14 // page size index (0..3) for each DTLB way - wsr.itlbcfg a15 // page size index (0..3) for each ITLB way - l32i a5, a3, CS_SA_ptevaddr - wsr.ptevaddr a5 - rsync - // Loop from last way to first (less register pressure that way). -.Loop_tlbmap_rest: - addi a11, a11, -8 // next way - l32i a8, a11, 0 // map of four (page size log2) per index for this way - // DTLB page size: - extui a12, a14, 0, 4 // page size index for this DTLB way - srli a14, a14, 4 // (for next way) - ssa8l a12 // prepare to shift right by 8*a12 - srl a12, a8 // page size log2 for this DTLB way - ssl a12 // prepare to shift left by a12 - movi a12, 1 // (to compute 1 << (page size log2)) - sll a12, a12 // page size for this DTLB way - - // Restore all entries of this DTLB way: - l32i a9, a11, 4 // number of entries for this way - sub a5, a11, a10 // way number * 8 - srli a5, a5, 3 // way number - extui a9, a9, 0, 8 -1: - l32i a6, a4, 0 // read entry VPN + ASID - extui a7, a6, 0, 8 // get ASID - bnez a7, 2f // if non-zero, need WDTLB - add a6, a6, a5 // zero, so need IDTLB - add way number - idtlb a6 // invalidate DTLB entry - j 5f -2: // Non-zero ASID. Put in RASID and adjust PS.RING accordingly. - bgeui a7, 5, 3f // branch if ASID >= 5 - addi a7, a7, -1 - slli a7, a7, 6 // PS.RING = ASID - 1 - addi a7, a7, 0x11 // PS.EXCM=1, PS.INTLEVEL=1 - movi a6, 0x04030201 // for ASID in {1 .. 4} - j 4f -3: // ASID >= 5, place it in RASID - movi a6, 0x00030201 - slli a7, a7, 24 - add a6, a7, a6 // RASID = 0x 03 02 01 - movi a7, 0xd1 // PS.RING=3, PS.EXCM=1, PS.INTLEVEL=1 -4: wsr.rasid a6 - wsr.ps a7 - rsync - l32i a6, a4, 0 // read entry VPN + ASID - l32i a7, a4, 4 // read entry PPN + CA - srli a6, a6, 8 // replace ASID ... - slli a6, a6, 8 // ... - add a6, a6, a5 // ... with way number - wdtlb a7, a6 // write DTLB entry ... -5: dsync - addi a4, a4, 8 - add a5, a5, a12 // next entry of this DTLB way - addi a9, a9, -1 - bnez a9, 1b - - // ITLB page size: - extui a12, a15, 0, 4 // page size index for this ITLB way - srli a15, a15, 4 // (for next way) - ssa8l a12 // prepare to shift right by 8*a12 - srl a12, a8 // page size log2 for this ITLB way - ssl a12 // prepare to shift left by a12 - movi a12, 1 // (to compute 1 << (page size log2)) - sll a12, a12 // page size for this ITLB way - - // Restore all entries of this ITLB way: - l32i a9, a11, 4 // number of entries for this way - sub a5, a11, a10 // way number * 8 - srli a5, a5, 3 // way number - bbsi.l a9, 15, 6f // skip ITLB if is a DTLB-only way - extui a9, a9, 0, 8 -1: - l32i a6, a4, 0 // read entry VPN + ASID - extui a7, a6, 0, 8 // get ASID - bnez a7, 2f // if non-zero, need WITLB - add a6, a6, a5 // zero, so need IITLB - add way number - iitlb a6 // invalidate ITLB entry - j 5f -2: // Non-zero ASID. Put in RASID and adjust PS.RING accordingly. - bgeui a7, 5, 3f // branch if ASID >= 5 - addi a7, a7, -1 - slli a7, a7, 6 // PS.RING = ASID - 1 - addi a7, a7, 0x11 // PS.EXCM=1, PS.INTLEVEL=1 - movi a6, 0x04030201 // for ASID in {1 .. 4} - j 4f -3: // ASID >= 5, place it in RASID - movi a6, 0x00030201 - slli a7, a7, 24 - add a6, a7, a6 // RASID = 0x 03 02 01 - movi a7, 0xd1 // PS.RING=3, PS.EXCM=1, PS.INTLEVEL=1 -4: wsr.rasid a6 - wsr.ps a7 - rsync - l32i a6, a4, 0 // read entry VPN + ASID - l32i a7, a4, 4 // read entry PPN + CA - srli a6, a6, 8 // replace ASID ... - slli a6, a6, 8 // ... - add a6, a6, a5 // ... with way number - j 8f - .align 16 // ensure WITLB and ISYNC in same cache line -8: witlb a7, a6 // write ITLB entry ... -5: isync - addi a4, a4, 8 - add a5, a5, a12 // next entry of this ITLB way - addi a9, a9, -1 - bnez a9, 1b -6: - - bne a11, a10, .Loop_tlbmap_rest // loop for next TLB way - l32i a5, a3, CS_SA_rasid - wsr.rasid a5 - movi a6, 0x11 - wsr.ps a6 - rsync - // Done saving TLBs. -#endif - -#if XCHAL_HAVE_MPU - addi a4, a3, CS_SA_mpuentry // MPU restore location - movi a5, XCHAL_MPU_ENTRIES - mpu_write_map a4, a5, a6, a7, a8, a9 - l32i a4, a3, CS_SA_cacheadrdis - wsr.cacheadrdis a4 -#endif - -#if XCHAL_HAVE_IDMA - addi a4, a3, CS_SA_idmaregs // IDMA regs restore location - _idma_restore a4, a5, a6, a7 -#endif - -#if XCHAL_HAVE_WINDOWED - // All the stack frames (except for our own) are supposed to be spilled - // into the stack. So now we restore the saved registers for our caller - // (and its caller) into the correct locations in the stack. See the - // comments in core-save.S and also the Xtensa Programmers Guide for - // more information. Of course we only restore if there is valid saved - // state. - - l32i a4, a3, CS_SA_caller_regs_saved // flag - beqz a4, .Lendcr // skip restore if 0 - - // Restore our caller's a0-a3 - - l32i a1, a3, CS_SA_areg + 1*4 // restore a1 - addi a4, a1, -16 - l32i a5, a3, CS_SA_caller_regs - l32i a6, a3, CS_SA_caller_regs + 4 - s32i a5, a4, 0 // caller a0 - s32i a6, a4, 4 // caller a1 - l32i a5, a3, CS_SA_caller_regs + 8 - l32i a6, a3, CS_SA_caller_regs + 12 - s32i a5, a4, 8 // caller a2 - s32i a6, a4, 12 // caller a3 - - // Now restore our callers caller's a0-a3 - - l32i a5, a3, CS_SA_caller_regs + 16 - l32i a6, a3, CS_SA_caller_regs + 20 - s32i a5, a1, 0 // caller caller a0 - s32i a6, a1, 4 // caller caller a1 - l32i a5, a3, CS_SA_caller_regs + 24 - l32i a6, a3, CS_SA_caller_regs + 28 - s32i a5, a1, 8 // caller caller a2 - s32i a6, a1, 12 // caller caller a3 - - // Now restore caller's a4-a11 as required - // NOTE a0 is pointing to _xtos_core_save() not the actual caller - - l32i a4, a3, CS_SA_areg // load actual return address - extui a4, a4, 30, 2 // top 2 bits of ret addr - blti a4, 2, .Lendcr - l32i a5, a1, 4 // a5 <- caller caller a1 - slli a4, a4, 4 - sub a4, a5, a4 // a4 <- bottom of extra save area - addi a5, a5, -16 // a5 <- top of extra save area - addi a6, a3, CS_SA_caller_regs + 32 // location to start restore from -.Lcrloop: - l32i a7, a6, 0 // Restore in groups of 4 registers - l32i a8, a6, 4 - s32i a7, a4, 0 - s32i a8, a4, 4 - l32i a7, a6, 8 - l32i a8, a6, 12 - s32i a7, a4, 8 - s32i a8, a4, 12 - addi a4, a4, 16 - addi a6, a6, 16 - blt a4, a5, .Lcrloop -.Lendcr: -#endif - - // Restore timers and CCOUNT right before enabling interrupts. We will - // try to restore any timer interrupts that were pending (as indicated - // by the INTERRUPT register) at the time of the state save. -#if XCHAL_HAVE_CCOUNT - .macro restore_timer num intr - l32i a5, a3, CS_SA_ccompare + 4*\num // Load CCOMPARE value - l32i a6, a3, CS_SA_interrupt // Load old INTERRUPT value - writesr ccompare \num a5 // Restore CCOMPARE - bbci.l a6, \intr, .Lrtdone\num // Intr not set for this timer - addi a5, a5, -1 // CCOUNT = CCOMPARE - 1 -.Lrttry\num: - wsr.ccount a5 // Set CCOUNT and wait - esync - nop - rsr.interrupt a6 - bbci.l a6, \intr, .Lrttry\num // If intr not set then retry -.Lrtdone\num: - .endm - -#if XCHAL_NUM_TIMERS > 0 - restore_timer 0 XCHAL_TIMER0_INTERRUPT -#endif -#if XCHAL_NUM_TIMERS > 1 - restore_timer 1 XCHAL_TIMER1_INTERRUPT -#endif -#if XCHAL_NUM_TIMERS > 2 - restore_timer 2 XCHAL_TIMER2_INTERRUPT -#endif -#if XCHAL_NUM_TIMERS > 3 - restore_timer 3 XCHAL_TIMER3_INTERRUPT -#endif - - // Attempt to clear any spurious timer interrupts caused by the CCOUNT - // dance above. -#if XCHAL_NUM_TIMERS > 0 - l32i a5, a3, CS_SA_ccount // Restore CCOUNT - wsr.ccount a5 - l32i a5, a3, CS_SA_interrupt // Load old intr value - bbsi.l a5, XCHAL_TIMER0_INTERRUPT, .Lx1 // Skip if timer0 intr set - rsr.ccompare0 a6 // Force timer0 intr clear - wsr.ccompare0 a6 -.Lx1: -#if XCHAL_NUM_TIMERS > 1 - bbsi.l a5, XCHAL_TIMER1_INTERRUPT, .Lx2 // Skip if timer1 intr set - rsr.ccompare1 a6 // Force timer1 intr clear - wsr.ccompare1 a6 -.Lx2: -#endif -#if XCHAL_NUM_TIMERS > 2 - bbsi.l a5, XCHAL_TIMER2_INTERRUPT, .Lx3 // Skip if timer2 intr set - rsr.ccompare2 a6 // Force timer2 intr clear - wsr.ccompare2 a6 -.Lx3: -#endif -#if XCHAL_NUM_TIMERS > 3 - bbsi.l a5, XCHAL_TIMER3_INTERRUPT, .Lx4 // Skip if timer3 intr set - rsr.ccompare3 a6 // Force timer3 intr clear - wsr.ccompare3 a6 -.Lx4: -#endif -#endif - - l32i a5, a3, CS_SA_ccount // Restore CCOUNT again - wsr.ccount a5 -#endif - -#if XCHAL_HAVE_INTERRUPTS - rsil a6, 15 // disable interrupts before enabling with INTENABLE - l32i a5, a3, CS_SA_intenable - wsr.intenable a5 - movi a4, XCHAL_INTTYPE_MASK_SOFTWARE // restore any pending software interrupts - l32i a5, a3, CS_SA_interrupt - and a5, a5, a4 - wsr.intset a5 - rsync -#endif - - //l32i a0, a3, CS_SA_restore_label // _xtos_core_save_common's return PC -#if XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_EXCEPTIONS - //l32i a4, a3, CS_SA_ps - l32i a5, a3, CS_SA_epc1 - wsr.epc1 a5 - l32i a5, a3, CS_SA_excsave1 - wsr.excsave1 a5 -# ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR - l32i a5, a3, CS_SA_depc - wsr.depc a5 -# endif - //wsr.ps a4 // PS restored by caller - //rsync -#endif - -#if XCHAL_HAVE_PSO_CDM - // As late as possible, wait for debug to wakeup, and clear PWRCTL.ShutProcOffOnPWait. - movi a4, XDM_MISC_PWRCTL - rer a5, a4 // read pwrctl - - // Wait for debug powerup to complete (if started): - bbci.l a5, PWRCTL_DEBUG_WAKEUP_SHIFT, 1f - movi a7, XDM_MISC_PWRSTAT -2: rer a6, a7 // read PWRSTAT - bbci.l a6, PWRSTAT_DEBUG_DOMAIN_ON_SHIFT, 2b // loop until debug is powered up -1: - - movi a7, ~PWRCTL_CORE_SHUTOFF // aka ShutProcOffOnPWait - and a5, a5, a7 // clear ShutProcOffOnPWait bit - wer a5, a4 // update pwrctl -#endif - - movi a4, 0 - s32i a4, a3, CS_SA_signature // make sure save area is marked as no longer valid -#if XCHAL_DCACHE_IS_WRITEBACK - dhwb a3, CS_SA_signature -#endif - ret // return from _xtos_core_save_common - // NOTE: a2 holds return value as specified to - // _xtos_core_restore() - - .size _xtos_core_restore_nw, . - _xtos_core_restore_nw - diff --git a/src/arch/xtensa/xtos/core-save.S b/src/arch/xtensa/xtos/core-save.S deleted file mode 100644 index 217d68c845d4..000000000000 --- a/src/arch/xtensa/xtos/core-save.S +++ /dev/null @@ -1,763 +0,0 @@ -// core-save.S -- core state save/restore routines (used by PSO) -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/core-save.S#1 $ - -// Copyright (c) 2012-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include -#include -#include -#include -#include -#include -#include "xtos-internal.h" - -.weak _idma_pso_save -//.type xtos_C_core_save, @function - - - .text - - - // (Place this alternate entry symbol *outside* the _xtos_core_save() - // function, to avoid confusing debugging / profiling / etc.) - .align 4 - .global _xtos_core_save_entry - .type _xtos_core_save_entry,@function -_xtos_core_save_entry: - j .Lcore_save - .size _xtos_core_save_entry, . - _xtos_core_save_entry - - - // int _xtos_core_save(unsigned flags, XtosCoreState *savearea, void *code) - // - // Generic processor state save routine. - // - // On entry (after ENTRY if windowed): - // a0 = return PC - // a2 = flags argument - // a3 = ptr to save area - // a4 = ptr to code to jump to after save (just return if 0) - // Returns: - // 0 when first returning from this call (if a4 == 0) - // non-zero (passed from restore call) when returning from restore - // (if a4 != 0, return behavior if any depends on code at a4) - // - .align 4 - .global _xtos_core_save - .type _xtos_core_save,@function -_xtos_core_save: - abi_entry - -.Lcore_save: - s32i a0, a3, CS_SA_areg + 0*4 // save a0 (clobbered below) - s32i a1, a3, CS_SA_areg + 1*4 // save a1 - s32i a2, a3, CS_SA_areg + 2*4 // save a2 (flags arg, for debugging only) - s32i a4, a3, CS_SA_areg + 4*4 // save a4 (code to jump to after saving) -#ifdef __XTENSA_CALL0_ABI__ - // Callee-saved regs: - s32i a12, a3, CS_SA_areg + 12*4 // save a12 - s32i a13, a3, CS_SA_areg + 13*4 // save a13 - s32i a14, a3, CS_SA_areg + 14*4 // save a14 - s32i a15, a3, CS_SA_areg + 15*4 // save a15 -#else - call4 xthal_window_spill // spill live caller windows to stack -#endif - j .Ls1 - - .align 16 -.Ls1: -#if XCHAL_HAVE_INTERRUPTS - rsil a4, 15 // disable interrupts before clobbering a0 -#elif XCHAL_HAVE_EXCEPTIONS - rsr.ps a4 -#endif - -#if XCHAL_HAVE_EXCEPTIONS - s32i a4, a3, CS_SA_ps // save PS -#endif - -#if XCHAL_HAVE_IDMA - movi a4, _idma_pso_save - beqz a4, .LnoIDMA -# ifdef __XTENSA_CALL0_ABI__ - mov a13, a3 - mov a12, a2 - addi a3, a3, CS_SA_idmaregs // location for IDMA regs save - call0 _idma_pso_save - mov a3, a13 - mov a2, a12 -# else - mov a6, a2 - addi a7, a3, CS_SA_idmaregs // location for IDMA regs save - call4 _idma_pso_save -# endif -.LnoIDMA: -#endif - -// not yet implemented -//# ifdef __XTENSA_CALL0_ABI__ -// mov a13, a3 -// mov a12, a2 -// call0 xtos_C_core_save -// mov a3, a13 -// mov a2, a12 -//# else -// mov a6, a2 -// mov a7, a3 -// call4 xtos_C_core_save -//# endif -//#endif - -#if XCHAL_HAVE_CCOUNT - rsr.ccount a5 // save CCOUNT restore value -#endif -#if XCHAL_HAVE_INTERRUPTS - rsr.interrupt a6 // save pending interrupts - s32i a6, a3, CS_SA_interrupt -#endif -#if XCHAL_HAVE_CCOUNT - s32i a5, a3, CS_SA_ccount -#endif - - call0 _xtos_core_save_common // save and shutoff -- returns after wakeup - - // a2 now contains return value. - // a3 still points to save area. - // Interrupts still disabled. - - // Restore WINDOWSTART to single window. Leave WINDOWBASE wherever it is. - //rsr.windowbase a6 - //movi a5, 1 - //ssl a6 - //sll a5, a5 - //wsr.windowstart a5 - //rsync - - l32i a0, a3, CS_SA_areg + 0*4 // restore a0 - l32i a1, a3, CS_SA_areg + 1*4 // restore a1 -#ifdef __XTENSA_CALL0_ABI__ - // Callee-saved regs: - l32i a12, a3, CS_SA_areg + 12*4 // restore a12 - l32i a13, a3, CS_SA_areg + 13*4 // restore a13 - l32i a14, a3, CS_SA_areg + 14*4 // restore a14 - l32i a15, a3, CS_SA_areg + 15*4 // restore a15 -#endif - -#if XCHAL_HAVE_EXCEPTIONS - // Now that we've restored windowed state (a0,a1), we can restore interrupts. - l32i a4, a3, CS_SA_ps // restore ps - wsr.ps a4 - rsync -#endif - - abi_return - - - - // Generic processor state save routine, callable from assembly-level - // (Does not assume valid stack, saves all ARs, no window-spill etc.) - // - // On entry: - // a0 = return PC - // a2 = flags argument - // a3 = ptr to save area - // a4 = ptr to code to jump to after save (just return if 0) - // All other registers are saved. - // Returns: - // 0 when first returning from this call (if a4 == 0) - // non-zero (passed from restore call) when returning from restore - // (if a4 != 0, return behavior if any depends on code at a4) - // - .align 4 - .global _xtos_core_save_nw -_xtos_core_save_nw: - s32i a0, a3, CS_SA_areg + 0*4 // save a0 (clobbered below) - s32i a1, a3, CS_SA_areg + 1*4 // save a1 - s32i a2, a3, CS_SA_areg + 2*4 // save a2 (flags arg, for debugging only) - s32i a4, a3, CS_SA_areg + 4*4 // save a4 (code to jump to after saving) - s32i a5, a3, CS_SA_areg + 5*4 // save a5 - s32i a6, a3, CS_SA_areg + 6*4 // save a6 - s32i a7, a3, CS_SA_areg + 7*4 // save a7 - j .Ls2 - - .align 16 -.Ls2: -#if XCHAL_HAVE_INTERRUPTS - rsil a4, 15 // disable interrupts before rotating etc -#elif XCHAL_HAVE_EXCEPTIONS - rsr.ps a4 -#endif - -#if XCHAL_HAVE_EXCEPTIONS - s32i a4, a3, CS_SA_ps // save PS -#endif - -#if XCHAL_HAVE_CCOUNT - rsr.ccount a5 // save CCOUNT restore value -#endif -#if XCHAL_HAVE_INTERRUPTS - rsr.interrupt a6 // save pending interrupts - s32i a6, a3, CS_SA_interrupt -#endif -#if XCHAL_HAVE_CCOUNT - s32i a5, a3, CS_SA_ccount -#endif - -#if XCHAL_HAVE_WINDOWED - movi a5, XCHAL_NUM_AREGS / 8 - 1 // number of 8-reg chunks to save (a0-a7 already done) -#endif -1: s32i a8, a3, CS_SA_areg + 8*4 // save a8 - s32i a9, a3, CS_SA_areg + 9*4 // save a9 - s32i a10,a3, CS_SA_areg + 10*4 // save a10 - s32i a11,a3, CS_SA_areg + 11*4 // save a11 - s32i a12,a3, CS_SA_areg + 12*4 // save a12 - s32i a13,a3, CS_SA_areg + 13*4 // save a13 - s32i a14,a3, CS_SA_areg + 14*4 // save a14 - s32i a15,a3, CS_SA_areg + 15*4 // save a15 -#if XCHAL_HAVE_WINDOWED - addi a11, a3, 8*4 // next frame (a11 will become a3, a13 become a5) - addi a13, a5, -1 - rotw 2 - bnez a5, 1b // loop until all frames done - rotw 2 // back to starting windowbase -#endif - - movi a1, 0 // not to save any regs from stack - call0 _xtos_core_save_common - - // a2 now contains return value. - // a3 still points to save area. - // Interrupts still disabled. - -#if XCHAL_HAVE_WINDOWED - rotw -2 - movi a5, XCHAL_NUM_AREGS / 8 - 1 // 8-reg chunks to restore (a0-a7 already done) - addi a3, a11, XCHAL_NUM_AREGS * 4 -1: rotw -2 - addi a3, a11, -8*4 - addi a5, a13, -1 -#endif - l32i a8, a3, CS_SA_areg + 8*4 // restore a8 - l32i a9, a3, CS_SA_areg + 9*4 // restore a9 - l32i a10,a3, CS_SA_areg + 10*4 // restore a10 - l32i a11,a3, CS_SA_areg + 11*4 // restore a11 - l32i a12,a3, CS_SA_areg + 12*4 // restore a12 - l32i a13,a3, CS_SA_areg + 13*4 // restore a13 - l32i a14,a3, CS_SA_areg + 14*4 // restore a14 - l32i a15,a3, CS_SA_areg + 15*4 // restore a15 -#if XCHAL_HAVE_WINDOWED - bnez a5, 1b // loop until all frames done - // We're now back to starting windowbase, and original a3. -#endif - - l32i a0, a3, CS_SA_areg + 0*4 // restore a0 - l32i a1, a3, CS_SA_areg + 1*4 // restore a1 - // Don't clobber return value, so don't restore a2. - l32i a4, a3, CS_SA_areg + 4*4 // restore a4 - l32i a5, a3, CS_SA_areg + 5*4 // restore a5 - l32i a6, a3, CS_SA_areg + 6*4 // restore a6 -#if XCHAL_HAVE_EXCEPTIONS - // Now that we've restored windowed state (a0,a1,done rotating), we can restore interrupts. - l32i a7, a3, CS_SA_ps // restore ps - wsr.ps a7 - rsync -#endif - l32i a7, a3, CS_SA_areg + 7*4 // restore a7 - ret - - - - - // Common state save / shut-off code. - // - // a0 = return PC within caller shut-off routine - // a1 = stack if != 0 - // a2 = flags argument - // a3 = pointer to _xtos_pso_savearea - // a4 = PS to save/restore - // PS.INTLEVEL = 15 (interrupts disabled, except NMI) - // a5-a15 (and other ARs) are available. - // NOTE: CCOUNT and INTERRUPT have already been saved in save area. - // - .align 4 - //.global _xtos_core_save_common -_xtos_core_save_common: -//#if XCHAL_HAVE_EXCEPTIONS -// s32i a4, a3, CS_SA_ps // save PS -//#endif - -#if XCHAL_HAVE_CACHE_BLOCKOPS - pfend.o // terminate non-essential block-prefetch ops -#endif - -#if XCHAL_HAVE_WINDOWED - // The following discussion is valid if we have a stack: - // At this point, all non-live register windows have been spilled to the - // stack. However, we cannot leave any spilled registers in our stack frame - // or our caller's stack frame, since these frames could change after we - // return and before restore() is called. So all spilled registers in the - // current and previous stack frames must be saved to the save area. This - // means a max of 16 registers: 4 base save registers for our caller, upto - // 8 extra save registers for our caller, and 4 base save registers for the - // next function up from our caller. The stack looks like this: - // - // ------------------------------- <---- stack ptr of function (i - 2) - // Base save area i - 3 - // ------------------------------- - // Extra save area i - 1 - // (0-8 registers depending on call type) - // ------------------------------- - // Locals i - 1 - // ------------------------------- <---- stack ptr of function (i - 1) - // Base save area i - 2 (our caller) - // - // ------------------------------- <---- Our stack ptr (a1) - // Base save area i - 1 - // ------------------------------- - // - // We don't have any extra save area or locals in our frame. See the - // Xtensa Programmer's Guide for more details of the stack layout. - // - // NOTE that we are not counting the call0 to _xtos_core_save_common() since - // that does not result in any register window rotation nor stack ptr change. - - s32i a1, a3, CS_SA_caller_regs_saved // save flag - beqz a1, .Lendcr // skip if no stack - - // Save our caller's a0-a3 from the base save area (a1-16) - - addi a4, a1, -16 - l32i a5, a4, 0 - l32i a6, a4, 4 - s32i a5, a3, CS_SA_caller_regs // caller a0 - s32i a6, a3, CS_SA_caller_regs + 4 // caller a1 - l32i a5, a4, 8 - l32i a6, a4, 12 - s32i a5, a3, CS_SA_caller_regs + 8 // caller a2 - s32i a6, a3, CS_SA_caller_regs + 12 // caller a3 - - // Save our callers caller's a0-a3 from its base save area (a1+0) - - l32i a5, a1, 0 - l32i a6, a1, 4 - s32i a5, a3, CS_SA_caller_regs + 16 // caller caller a0 - s32i a6, a3, CS_SA_caller_regs + 20 // caller caller a1 - l32i a5, a1, 8 - l32i a6, a1, 12 - s32i a5, a3, CS_SA_caller_regs + 24 // caller caller a2 - s32i a6, a3, CS_SA_caller_regs + 28 // caller caller a3 - - // Now save 0-8 registers for our caller from its ext save area - // NOTE we can't use a0 directly because we are one level down - - l32i a4, a3, CS_SA_areg // pull in the return address - extui a4, a4, 30, 2 // Top 2 bits of ret addr - blti a4, 2, .Lendcr // No regs to save - l32i a5, a1, 4 // a5 <- caller caller a1 - slli a4, a4, 4 - sub a4, a5, a4 // a4 <- bottom of extra save area - addi a5, a5, -16 // a5 <- top of extra save area - addi a6, a3, CS_SA_caller_regs + 32 // location to start saving to -.Lcrloop: - l32i a7, a4, 0 // Save in groups of 4 registers - l32i a8, a4, 4 - s32i a7, a6, 0 - s32i a8, a6, 4 - l32i a7, a4, 8 - l32i a8, a4, 12 - s32i a7, a6, 8 - s32i a8, a6, 12 - addi a4, a4, 16 - addi a6, a6, 16 - blt a4, a5, .Lcrloop -.Lendcr: -#endif - - // We want to save the CCOUNT value as soon as feasible after disabling - // interrupts, so that the counter does not run past any CCOMPARE value - // and miss a timer interrupt. The callers of this function have saved - // the values of CCOUNT and INTERRUPT immediately after disabling interrupts. - -#if XCHAL_HAVE_CCOUNT - .set _idx, 0 - .rept XCHAL_NUM_TIMERS - INDEX_SR rsr.ccompare a5 - s32i a5, a3, CS_SA_ccompare + 4*_idx - .set _idx, _idx+1 - .endr -#endif - - s32i a0, a3, CS_SA_restore_label // where to return to, to return from function -#if XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_EXCEPTIONS - rsr.epc1 a5 - s32i a5, a3, CS_SA_epc1 - rsr.excsave1 a5 - s32i a5, a3, CS_SA_excsave1 -# ifdef XCHAL_DOUBLEEXC_VECTOR_VADDR - rsr.depc a5 - s32i a5, a3, CS_SA_depc -# endif -#endif -#if XCHAL_HAVE_WINDOWED - rsr.windowbase a5 - s32i a5, a3, CS_SA_windowbase // save windowbase - rsr.windowstart a5 - s32i a5, a3, CS_SA_windowstart // save windowstart -#endif - rsr.sar a5 - s32i a5, a3, CS_SA_sar // save sar - -#if XCHAL_HAVE_PSO_CDM - // Save PWRCTL, and update according to flags argument. - movi a4, XDM_MISC_PWRCTL - movi a6, PWRCTL_MEM_WAKEUP - rer a7, a4 // get pwrctl - s32i a7, a3, CS_SA_pwrctl // save pwrctl - // Avoid setting power-control bits if not already set, i.e. clear them only. - bbci.l a2, XTOS_COREF_PSO_SHIFT, 1f // if not shutting off, don't touch power bits - - // Set PWRCTL MEM_WAKEUP bit according to flags (whether to let mem power off). - or a5, a7, a6 // set... - xor a5, a5, a6 // ... and clear MEM_WAKEUP bit to write - and a6, a2, a6 // isolate MEM_WAKEUP bit from flags - or a5, a5, a6 // set MEM_WAKEUP bit to write from flags - // Clear PWRCTL DEBUG_WAKEUP bit if cleared in flags (if letting debug power off). - movi a6, ~PWRCTL_DEBUG_WAKEUP - or a6, a2, a6 // isolate DEBUG_WAKEUP bit from flags - and a6, a5, a6 // clear it if was clear in flags - // Update PWRCTL - wer a6, a4 // write new pwrctl - //extw // let the new pwrctl value settle -1: -#endif - - .set _idx, 2 - .rept XCHAL_NUM_INTLEVELS+XCHAL_HAVE_NMI-1 - INDEX_SR rsr.epc a5 - s32i a5, a3, CS_SA_epc + 4*(_idx-2) - INDEX_SR rsr.eps a5 - s32i a5, a3, CS_SA_eps + 4*(_idx-2) - INDEX_SR rsr.excsave a5 - s32i a5, a3, CS_SA_excsave + 4*(_idx-2) - .set _idx, _idx+1 - .endr - -#if XCHAL_HAVE_LOOPS - rsr.lbeg a5 - s32i a5, a3, CS_SA_lbeg - rsr.lend a5 - s32i a5, a3, CS_SA_lend - rsr.lcount a5 - s32i a5, a3, CS_SA_lcount -#endif -#if XCHAL_HAVE_ABSOLUTE_LITERALS - rsr.litbase a5 - s32i a5, a3, CS_SA_litbase -#endif -#if XCHAL_HAVE_VECBASE - rsr.vecbase a5 - s32i a5, a3, CS_SA_vecbase -#endif -#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */ - rsr.atomctl a5 - s32i a5, a3, CS_SA_atomctl -#endif -#if XCHAL_HAVE_PREFETCH - movi a5, 0 // disable prefetch during shutoff - xsr.prefctl a5 - s32i a5, a3, CS_SA_prefctl -#endif -#if XCHAL_USE_MEMCTL - rsr.memctl a5 - s32i a5, a3, CS_SA_memctl -#endif -#if XCHAL_HAVE_INTERRUPTS - rsr.intenable a5 - s32i a5, a3, CS_SA_intenable -#endif -#if XCHAL_HAVE_DEBUG - // NOTE: restore of debug state is conditional, - // as the power-down and wakeup code might be actively debugged. - rsr.icount a5 - s32i a5, a3, CS_SA_icount - rsr.icountlevel a5 - s32i a5, a3, CS_SA_icountlevel - rsr.debugcause a5 - s32i a5, a3, CS_SA_debugcause // (won't get restored?) - //rsr.ddr a5 - //s32i a5, a3, CS_SA_ddr -# if XCHAL_NUM_IBREAK - rsr.ibreakenable a5 - s32i a5, a3, CS_SA_ibreakenable -# endif - .set _idx, 0 - .rept XCHAL_NUM_IBREAK - INDEX_SR rsr.ibreaka a5 - s32i a5, a3, CS_SA_ibreaka + 4*_idx - .set _idx, _idx+1 - .endr - .set _idx, 0 - .rept XCHAL_NUM_DBREAK - INDEX_SR rsr.dbreakc a5 - s32i a5, a3, CS_SA_dbreakc + 4*_idx - INDEX_SR rsr.dbreaka a5 - s32i a5, a3, CS_SA_dbreaka + 4*_idx - .set _idx, _idx+1 - .endr -#endif - - .set _idx, 0 - .rept XCHAL_NUM_MISC_REGS - INDEX_SR rsr.misc a5 - s32i a5, a3, CS_SA_misc + 4*_idx - .set _idx, _idx+1 - .endr - -#if XCHAL_HAVE_MEM_ECC_PARITY - rsr.mepc a5 - s32i a5, a3, CS_SA_mepc - rsr.meps a5 - s32i a5, a3, CS_SA_meps - rsr.mesave a5 - s32i a5, a3, CS_SA_mesave - rsr.mesr a5 - s32i a5, a3, CS_SA_mesr - rsr.mecr a5 - s32i a5, a3, CS_SA_mecr - rsr.mevaddr a5 - s32i a5, a3, CS_SA_mevaddr -#endif - - /* TIE state */ - addi a4, a3, CS_SA_ncp - xchal_ncp_store a4, a5,a6,a7,a8 // save non-coprocessor state -#if XCHAL_HAVE_CP - rsr.cpenable a5 - s32i a5, a3, CS_SA_cpenable - movi a6, -1 - wsr.cpenable a6 // enable all coprocessors - rsync - xchal_cp0_store a4, a5,a6,a7,a8 continue=1 - xchal_cp1_store a4, a5,a6,a7,a8 continue=1 - xchal_cp2_store a4, a5,a6,a7,a8 continue=1 - xchal_cp3_store a4, a5,a6,a7,a8 continue=1 - xchal_cp4_store a4, a5,a6,a7,a8 continue=1 - xchal_cp5_store a4, a5,a6,a7,a8 continue=1 - xchal_cp6_store a4, a5,a6,a7,a8 continue=1 - xchal_cp7_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp8_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp9_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp10_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp11_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp12_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp13_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp14_store a4, a5,a6,a7,a8 continue=1 - //xchal_cp15_store a4, a5,a6,a7,a8 continue=1 -#endif - - /* TLB state (for known MMU types only, not internal custom) */ -#if XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR - addi a4, a3, CS_SA_tlbs // where to start storing TLB entry info - movi a5, 0x20000000 - movi a6, 0 -1: rdtlb1 a7, a6 // read DTLB entry PPN + CA - s32i a7, a4, 0 - ritlb1 a7, a6 // read ITLB entry PPN + CA - s32i a7, a4, 4 - addi a4, a4, 8 - add a6, a6, a5 - bnez a6, 1b - -#elif XCHAL_HAVE_PTP_MMU - // Declare a table of TLB entries to save/restore. - // Each entry is a 32-bit index to use directly with [rw][di]tlb[01]. - // Indices assume ITLBCFG == DTLBCFG == 0. - // Bit 4 means not-for-dtlb, and bit 5 means not-for-itlb - // (these bits aren't used by these instructions, so okay to use for this). - .section .rodata, "a" - .global _xtos_pso_tlbmap - .global _xtos_pso_tlbmap_end - .type _xtos_pso_tlbmap, @object -_xtos_pso_tlbmap: - .long 0x0C0C0C0C, ARF_ENTRIES // *TLB way 0, 4/8 entries of 4KB - .long 0x0C0C0C0C, ARF_ENTRIES // *TLB way 1, 4/8 entries of 4KB - .long 0x0C0C0C0C, ARF_ENTRIES // *TLB way 2, 4/8 entries of 4KB - .long 0x0C0C0C0C, ARF_ENTRIES // *TLB way 3, 4/8 entries of 4KB - .long 0x1A181614, 4 // *TLB way 4, 4 entries of 1MB/4MB/16MB/64MB -# if XCHAL_HAVE_SPANNING_WAY /* MMU v3 */ - .long 0x1C1B1C1B, 4 // *TLB way 5, 4 entries of 128MB/256MB - .long 0x1B1D1B1D, 8 // *TLB way 6, 8 entries of 512MB/128MB -# endif - .long 0x0C0C0C0C, 0x8001 // DTLB way 7, 1 entry of 4KB - .long 0x0C0C0C0C, 0x8001 // DTLB way 8, 1 entry of 4KB - .long 0x0C0C0C0C, 0x8001 // DTLB way 9, 1 entry of 4KB -_xtos_pso_tlbmap_end: - .size _xtos_pso_tlbmap, . - _xtos_pso_tlbmap - - .text - addi a4, a3, CS_SA_tlbs // where to start storing TLB entry info - movi a10, _xtos_pso_tlbmap - movi a11, _xtos_pso_tlbmap_end - rsr.dtlbcfg a14 // page size index (0..3) for each DTLB way - rsr.itlbcfg a15 // page size index (0..3) for each ITLB way - s32i a14, a3, CS_SA_dtlbcfg - s32i a15, a3, CS_SA_itlbcfg - rsr.ptevaddr a5 - s32i a5, a3, CS_SA_ptevaddr - rsr.rasid a5 - s32i a5, a3, CS_SA_rasid - // Loop from last way to first (less register pressure that way). -.Loop_tlbmap: - addi a11, a11, -8 // next way - l32i a8, a11, 0 // map of four (page size log2) per index for this way - // DTLB page size: - extui a12, a14, 0, 4 // page size index for this DTLB way - srli a14, a14, 4 // (for next way) - ssa8l a12 // prepare to shift right by 8*a12 - srl a12, a8 // page size log2 for this DTLB way - ssl a12 // prepare to shift left by a12 - movi a12, 1 // (to compute 1 << (page size log2)) - sll a12, a12 // page size for this DTLB way - - // Save all entries of this DTLB way: - l32i a9, a11, 4 // number of entries for this way - sub a5, a11, a10 // way number * 8 - srli a5, a5, 3 // way number - extui a9, a9, 0, 8 -1: rdtlb0 a6, a5 // read DTLB entry VPN + ASID ... - rdtlb1 a7, a5 // read DTLB entry PPN + CA ... - add a5, a5, a12 // next entry of this DTLB way - s32i a6, a4, 0 // save entry ... - s32i a7, a4, 4 - addi a4, a4, 8 - addi a9, a9, -1 - bnez a9, 1b - - // ITLB page size: - extui a12, a15, 0, 4 // page size index for this ITLB way - srli a15, a15, 4 // (for next way) - ssa8l a12 // prepare to shift right by 8*a12 - srl a12, a8 // page size log2 for this ITLB way - ssl a12 // prepare to shift left by a12 - movi a12, 1 // (to compute 1 << (page size log2)) - sll a12, a12 // page size for this ITLB way - - // Save all entries of this ITLB way: - l32i a9, a11, 4 // number of entries for this way - sub a5, a11, a10 // way number * 8 - srli a5, a5, 3 // way number - bbsi.l a9, 15, 2f // skip ITLB if is a DTLB-only way - extui a9, a9, 0, 8 -1: ritlb0 a6, a5 // read ITLB entry VPN + ASID ... - ritlb1 a7, a5 // read ITLB entry PPN + CA ... - add a5, a5, a12 // next entry of this ITLB way - s32i a6, a4, 0 // save entry ... - s32i a7, a4, 4 - addi a4, a4, 8 - addi a9, a9, -1 - bnez a9, 1b -2: - bne a11, a10, .Loop_tlbmap // loop for next TLB way - // Done saving TLBs. -#endif - -#if XCHAL_HAVE_CACHE_BLOCKOPS - pfwait.a // wait for any remaining block-prefetch ops -#endif - -#if XCHAL_HAVE_MPU - addi a4, a3, CS_SA_mpuentry // location for MPU save - mpu_read_map a4, a5, a6 - rsr.cacheadrdis a4 - addi a5, a3, CS_SA_cacheadrdis - s32i a4, a5, 0 - -#if XCHAL_DCACHE_IS_WRITEBACK - // Must write this piece back to memory, because if it stays - // in the cache and we try to restore with caches bypassed, - // the wrong values will be fetched from memory. - // TODO: See if possible to replace with call to xthal_dcache_region_writeback - // TODO: If going to write back full dcache below, skip this step - addi a4, a3, CS_SA_mpuentry - movi a5, CS_SA_ncp - CS_SA_mpuentry - dcache_writeback_region a4, a5, a7, a8 -#endif -#endif - - // With data cache coherency enabled, need a full data cache - // writeback and invalidate, then disable coherency, before shut-off. - // Otherwise, if we'll let dcache power off, writeback its contents. - // - // We make sure the signature only gets written after everything - // else is written back (if we writeback), and only gets written - // back if the rest gets written back. - movi a6, CORE_STATE_SIGNATURE -#if XCHAL_DCACHE_IS_WRITEBACK -# if XCHAL_HAVE_PSO_CDM && XCHAL_DCACHE_IS_COHERENT && XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2012_0 - rsr.memctl a4 - bbci.l a2, XTOS_COREF_PSO_SHIFT, 1f // if not shutting off, leave snoops as is - bbci.l a4, MEMCTL_SNOOP_EN_SHIFT, 1f // snoops (coherence) enabled? - dcache_writeback_inv_all a4, a5, a7, 0 // yes: writeback-invalidate - memw // wait for writeback to complete - s32i a6, a3, CS_SA_signature - dhwbi a3, CS_SA_signature - // Now that dcache is empty, make sure snoops are off during shut-off. - addi a4, a4, -MEMCTL_SNOOP_EN - wsr.memctl a4 - j 9f -1: -# endif - bbsi.l a2, PWRCTL_MEM_WAKEUP_SHIFT, 7f // letting caches power off? - dcache_writeback_all a4, a5, a7, 0 // yes: writeback - memw // wait for writeback to complete - j 8f - - // The signature and the cache/TLB state must be written out to - // main memory even though the caches stay on, because on restart - // we will come up with caches bypassed and need to read the state - // back before the cache/TLB is set up. -7: - mov a4, a3 - movi a5, CS_SA_ncp - dcache_writeback_region a4, a5, a7, a8 - memw -8: - s32i a6, a3, CS_SA_signature - dhwb a3, CS_SA_signature // needed even if caches stay on -#else - s32i a6, a3, CS_SA_signature -#endif - -9: l32i a4, a3, CS_SA_areg + 4*4 // restore a4 (code to jump to after saving) - memw // wait for signature to be in memory - - beqz a4, 1f // code to jump to? - jx a4 // yes, jump to it -1: l32i a0, a3, CS_SA_restore_label // no, return: restore return PC - movi a2, 0 // return 0 - ret - - - .size _xtos_core_save, . - _xtos_core_save - diff --git a/src/arch/xtensa/xtos/core-shutoff.S b/src/arch/xtensa/xtos/core-shutoff.S deleted file mode 100644 index c848e38ca1ab..000000000000 --- a/src/arch/xtensa/xtos/core-shutoff.S +++ /dev/null @@ -1,425 +0,0 @@ -// reset-pso.S -- PSO restore routine, invoked from Reset Vector -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/core-shutoff.S#1 $ - -// Copyright (c) 2012-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include -#include -#include -#include -#include "xtos-internal.h" - -#if XCHAL_HAVE_PSO - .macro simulate_reset - // Single or multiple power domains, no retention. - // Just simulate reset. Set PS: - movi a5, 0x1F - wsr.ps a5 - rsync - // Scramble things: - rotw 3 - // Jump to reset vector: - movi a5, _ResetVector - jx a5 - .endm -#endif - - -#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION - // PSO: core state save area. - // This could be pretty large (includes TIE state, TLB state, many regs). - // - .section .bss, "aw" - .align XCHAL_TOTAL_SA_ALIGN - .global _xtos_pso_savearea - .type _xtos_pso_savearea, @object - .size _xtos_pso_savearea, XtosCoreStateSize -_xtos_pso_savearea: - .space XtosCoreStateSize -#endif - - - .text - - - // This version of the _xtos_core_shutoff() function can be called from assembly-level, - // where the stack might not be defined/usable, so can't do window-spill - // etc. This saves address registers per call0 ABI (all except a0/a2..a11). - // - // On entry: a0 = return PC, a2 = flags argument, a3..a11 = undefined/available. - // All other registers are saved/restored. - // - .align 4 - .global _xtos_core_shutoff_nw - .type _xtos_core_shutoff_nw,@function -_xtos_core_shutoff_nw: -#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION - movi a5, _xtos_core_save_nw -#endif - mov a11, a0 // ABI-independent return PC - j .Lcommon_shutoff - .size _xtos_core_shutoff_nw, . - _xtos_core_shutoff_nw - - - - // int _xtos_core_shutoff(unsigned flags) - // - // Save all processor state and shut-off the core. - // Returns when the core wakes up, and all state was restored - // (except in single power domain case, see below). - // - // For more details see: System SW Ref Manual, XTOS Chapter. - // - // Possible return values: - // - // 0 core did shut-off (return via reset vector, - // or after waiti for full-retention case) - // - // 1,2 core did not shut-off (other requestors were already - // requesting this core to stay on at time of call) - // (1 == early, 2 == late) - // - // 3 core did not shut-off (multi-power-domains no retention, - // and waiti resumed; FIXME: can this happen?) - // - // -1 core does not have PSO feature - // - // NOTE: in the single power domain case, this function never returns. - // The external system must power the core off than back on, - // and execution resumes at the reset vector. - // - // The flags parameter indicates whether to request memory and/or debug domains - // to stay powered on while the core is shut-off. (This parameter is ignored - // for the single power domain case.) If 0, they are both allowed to - // shut-off (although other external requesters may keep them powered on). - // Otherwise, one or both of these bits may be specified (or'ed together): - // XTOS_KEEPON_MEM force memory domain on during core power shut-off - // XTOS_KEEPON_DEBUG force debug domain on during core power shut-off - // If XTOS_KEEPON_MEM is specified, dcache writeback is NOT done. - // - // Effectively, the flags parameter sets the value of these two PWRCTL register - // bits (over ERI) during core power shut-off. The value of these two bits - // (as they were prior to calling this function) are saved, and restored on wakeup. - // Thus, if the core was requesting that the debug domain be powered on, and - // _xtos_core_shutoff() lets it power-off, then upon wakeup, the software restore - // sequence restores debug domain power, and waits for debug power to be ON. - // - // - .align 4 - .global _xtos_core_shutoff - .type _xtos_core_shutoff,@function -_xtos_core_shutoff: - abi_entry - -#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION - movi a5, _xtos_core_save_entry -#endif - movi a11, 1f // ABI-independent return PC - j .Lcommon_shutoff - -1: abi_return - - - - - .align 4 -.Lcommon_shutoff: - - - -#if XCHAL_HAVE_PSO_CDM && XCHAL_HAVE_PSO_FULL_RETENTION - // Multiple power domains, full retention in HW. - // Do the minimum required (things that need to be changed during shutoff): - - // Check whether other agents are keeping this core powered on, - // and avoid going through save sequence if we're not going to - // power down anyway. - movi a3, XDM_MISC_PWRSTAT - rer a6, a3 - movi a5, 1 // indicates other agents want this core powered on - bbsi.l a6, PWRSTAT_CORE_STILL_NEEDED_SHIFT, 1f - - rsil a8, 15 // disable interrupts - -# if XCHAL_HAVE_PREFETCH - // Save prefetch control and disable prefetch. - movi a10, 0 - xsr.prefctl a10 -# endif - -# if XCHAL_DCACHE_IS_WRITEBACK - bbsi.l a2, PWRCTL_MEM_WAKEUP_SHIFT, 7f // letting caches power off? - dcache_writeback_all a4, a7, a9,0 // yes: writeback - memw // wait for writeback to complete -7: -# endif - - // Save PWRCTL, and set ShutProcOffOnPWait (for WAITI to shut-off the core). - // (With dcache coherence, can this be used as signal to system - // to turn off snoops to this core?) - // - movi a4, XDM_MISC_PWRCTL - rer a9, a4 // get pwrctl - movi a6, PWRCTL_CORE_SHUTOFF // aka ShutProcOffOnPWait - or a7, a9, a6 // indicate WAITI will shut-off - xor a9, a7, a6 // make sure it's clear in saved pwrctl - wer a7, a4 // write new pwrctl - - // Make sure everything stabilizes: - isync - extw - - // With ShutProcOffOnPWait set, external agents can't change their mind. - // So check again whether other agents are keeping this core powered on, - // and avoid going through save sequence if we're not going to - // power down anyway. - rer a6, a3 // read PWRSTAT - movi a5, 2 // if abort: external agent wants core powered on - bbsi.l a6, PWRSTAT_CORE_STILL_NEEDED_SHIFT, .Lshutoff_late_abort - - // Set PWRCTL MEM_WAKEUP bit according to flags (whether to let mem power off). - movi a6, PWRCTL_MEM_WAKEUP - or a5, a7, a6 // set... - xor a5, a5, a6 // ... and clear MEM_WAKEUP bit to write - and a6, a2, a6 // isolate MEM_WAKEUP bit from flags - or a5, a5, a6 // set MEM_WAKEUP bit to write from flags - // Clear PWRCTL DEBUG_WAKEUP bit if cleared in flags (if letting debug power off). - movi a6, ~PWRCTL_DEBUG_WAKEUP - or a6, a2, a6 // isolate DEBUG_WAKEUP bit from flags - and a6, a5, a6 // clear it if was clear in flags - // Update PWRCTL - wer a6, a4 // write new pwrctl - extw // let the new pwrctl value settle - - // Okay, go for it -- power down (shutoff). - -# if !XTOS_PSO_TEST - waiti 0 // now shut-off! (interrupts enabled for power-on) -# endif - // Resumes here after power comes back on, after some interrupt taken. - wsr.ps a8 // restore interrupts - movi a5, 0 // successful - rsync // ensure wsr.ps completes - - // FIXME: do we need to initialize caches? - -.Lshutoff_late_abort: - wer a7, a4 // restore pwrctl (except ShutProcOffOnPWait still set) - - // Wait for debug powerup to complete (if started): - bbci.l a7, PWRCTL_DEBUG_WAKEUP_SHIFT, 3f - movi a6, XDM_MISC_PWRSTAT -2: rer a6, a6 // read PWRSTAT - bbci.l a6, PWRSTAT_DEBUG_DOMAIN_ON_SHIFT, 2b // loop until powered up -3: - -# if XCHAL_HAVE_PREFETCH - wsr.prefctl a10 // restore prefetch control -# endif - - // If CachesLostPower bit set, is there need to initialize caches? - - wer a9, a4 // clear ShutProcOffOnPWait - - wsr.ps a8 // restore interrupts - rsync // ensure wsr.ps completes - -1: mov a2, a5 - jx a11 // return (to ABI-dependent code if C callable) - - - -#elif XCHAL_HAVE_PSO_CDM /*&& !XCHAL_HAVE_PSO_FULL_RETENTION*/ - // Multiple power domains, no hardware retention. - // Do full core state save/restore in software. - - // Check whether other agents are keeping this core powered on, - // and avoid going through save sequence if we're not going to - // power down anyway. - movi a3, XDM_MISC_PWRSTAT - rer a3, a3 - bbsi.l a3, PWRSTAT_CORE_STILL_NEEDED_SHIFT, 1f - - movi a3, XTOS_COREF_PSO - or a2, a2, a3 // indicate power shutoff in flags - - movi a3, _xtos_pso_savearea - movi a4, _xtos_core_shutoff_aftersave - jx a5 // continue in _xtos_core_save (past prologue) - // which saves processor state, powers down - // debug/mem per a2, shuts off prefetch and - // snooping, and jumps to a4 - -1: movi a2, 1 // other agents want this core powered on - jx a11 // return (to ABI-dependent code if C callable) - - .align 4 - //.global _xtos_core_shutoff_aftersave -_xtos_core_shutoff_aftersave: - - // Multiple power domains, no retention. - - // Set ShutProcOffOnPWait, for WAITI to shut-off the core. - // (With dcache coherence, can this be used as signal to system - // to turn off snoops to this core?) - // - movi a4, XDM_MISC_PWRCTL - rer a7, a4 // get pwrctl - movi a6, PWRCTL_CORE_SHUTOFF // aka ShutProcOffOnPWait - or a7, a7, a6 // indicate WAITI will shut-off - wer a7, a4 // write new pwrctl - - // Make sure everything stabilizes: - isync - extw - - // Check whether other agents are keeping this core powered on, - // and avoid going through save sequence if we're not going to - // power down anyway. - movi a4, XDM_MISC_PWRSTAT - movi a2, 2 // if abort: external agent wants core powered on - rer a6, a4 - bbsi.l a6, PWRSTAT_CORE_STILL_NEEDED_SHIFT, .Lshutoff_late_abort - - // Call system-specific function to wait for system specific - // transactions to quiesce before shutting down the processor. - // This function may also abort the shutdown, however whoever - // attempts it must do it carefully: the function must know - // that it's possible to abort, it must do whatever's needed - // in the system to resume normal execution (e.g. restart - // snoops, DMA, etc), and for power reasons the software must - // avoid calling this shutdown routine in the first place if - // it can know then that it would end up aborting here. - // - // This is always a call0 function. - // TBD: can it be a C function instead? - // TBD: describe exact calling conventions, if asm call0 - - .weak xtos_system_ready_for_core_shutoff - movi a2, xtos_system_ready_for_core_shutoff - //isync - beqz a2, 1f - callx0 a2 - bnez a2, .Lshutoff_late_abort // if function returns error, abort shutdown -1: - - // Okay, go for it -- power down (shutoff). - - -# if XTOS_PSO_TEST - // Test only -- weakly simulate shutoff in sw, don't actually do it. - simulate_reset -# elif XCHAL_HAVE_INTERRUPTS - waiti 15 // now shut-off! -# elif XCHAL_HAVE_HALT - halt -# else -# error "PSO assumes interrupts (for WAITI) or HALT architecture (for HALT)" -# endif - - // Execution should not proceed here. - // If we get here, some error has occurred [FIXME] - - movi a2, 3 // WAITI resumed - -.Lshutoff_late_abort: - // We end up here if returning from shutoff request. - // Here, a2 == return code. - // Restore what's been clobbered (and doesn't get restored by caller): - // PWRCTL, MEMCTL, return PC. - - l32i a0, a3, CS_SA_restore_label // restore return PC - - // Restore PWRCTL. - movi a4, XDM_MISC_PWRCTL - l32i a5, a3, CS_SA_pwrctl // get saved pwrctl - wer a5, a4 // restore pwrctl - // Wait for debug powerup to complete (if started): - bbci.l a5, PWRCTL_DEBUG_WAKEUP_SHIFT, 1f - movi a7, XDM_MISC_PWRSTAT -2: rer a6, a7 // read PWRSTAT - bbci.l a6, PWRSTAT_DEBUG_DOMAIN_ON_SHIFT, 2b // loop until powered up -1: - - // Restore MEMCTL. -# if XCHAL_USE_MEMCTL - l32i a5, a3, CS_SA_memctl - wsr.memctl a5 -# endif - - // Clear the signature, to mark save area as no longer valid. - s32i a2, a3, CS_SA_signature -# if XCHAL_DCACHE_IS_WRITEBACK - dhwb a3, CS_SA_signature -# endif - - ret // return from _xtos_core_save_common - - - -#elif XCHAL_HAVE_PSO - // Single power domain. (No retention.) - - rsil a8, 15 // disable interrupts - -# if XCHAL_HAVE_PREFETCH - // Disable prefetch. - movi a10, 0 - wsr.memctl a10 -# endif - -# if XCHAL_DCACHE_IS_WRITEBACK - bbsi.l a2, PWRCTL_MEM_WAKEUP_SHIFT, 7f // letting caches power off? - dcache_writeback_all a4, a5, a6, 0 // yes: writeback - memw // wait for writeback to complete -7: -# endif - -1: waiti 15 // wait for shut-off - j 1b // loop until we get powered off - - - -#else - // No PSO. - movi a2, -1 - jx a11 // return (to ABI-dependent code if C callable) - -#endif - - - - -#if XCHAL_HAVE_PSO_CDM -# if XCHAL_HAVE_PSO_FULL_RETENTION - -# else /* not full retention */ - - -# endif /* !retention */ -#endif /* multi power domains */ - - - .size _xtos_core_shutoff, . - _xtos_core_shutoff - diff --git a/src/arch/xtensa/xtos/crt0-app.S b/src/arch/xtensa/xtos/crt0-app.S deleted file mode 100644 index 7f12337fd535..000000000000 --- a/src/arch/xtensa/xtos/crt0-app.S +++ /dev/null @@ -1,174 +0,0 @@ -// crt0-app.S -// Applications downloaded in RAM using a debug monitor (eg. XMON, RedBoot) -// start here at _app_reset. Such applications don't have any vectors: -// all exceptions are handled by the debug monitor. -// Thus, this file essentially plays the role of the reset vector -// to setup a few things before jumping to _start (in crt1*.S). - -// Copyright (c) 2005-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - - -// Assumptions on entry to _app_reset: -// - debug monitor handles all exceptions, has setup all vectors -// - interrupt related state is initialized -// (interrupts disabled or enabled for debug monitor's purposes) -// - debug option related state is initialized (for debug monitor) -// - any MMU related state is initialized (all handled by debug monitor) -// - caches are initialized (cache attributes not necessarily correct) -// - entire application is loaded (no unpacking needed here) - -// Assumptions on exit from _app_reset, ie. when jumping to _start: -// - low (level-one) and medium priority interrupts are disabled -// - C calling context not initialized: -// - PS not fully initialized (eg. PS.WOE not set per ABI) -// - SP not initialized -// - the following are initialized: -// - LITBASE, WindowBase, WindowStart, LCOUNT, CPENABLE, FP's FCR and FSR, -// cache attributes - -/**************************************************************************/ - - .text - .global _app_reset -_app_reset: - /* _app_reset may be required to be located at the beginning of the text - segment. However, the constant pool for _app_reset must be placed - before the code. Jump over the constant pool to solve this. */ - j .LpastInitialConstants - - .literal_position // tells the assembler/linker to place literals here - -.LpastInitialConstants: - // Keep a0 zero. It is used to initialize a few things. - // It is also the return address, where zero indicates - // that the frame used by _start is the bottommost frame. - // - movi a0, 0 // keep this register zero. - -#if XCHAL_HAVE_LOOPS - wsr.lcount a0 // loop count = 0 -#endif /* XCHAL_HAVE_LOOPS */ - - // Interrupts might be enabled, make sure at least medium and low priority - // interrupts are disabled until WindowBase, WindowStart, SP, and the stack - // are all properly setup (which will happen outside this file, after the - // _start label). We leave loops enabled on new exception architecture. -#if XCHAL_HAVE_EXCEPTIONS - movi a2, XCHAL_EXCM_LEVEL - wsr.ps a2 // set PS.INTLEVEL=EXCM_LEVEL, PS.WOE=0, PS.EXCM=0 - rsync -#endif - - // DO THIS FIRST: initialize the window start and base - // before, so that windows don't move under us. -#if XCHAL_HAVE_WINDOWED - // We do this even if we are assembling for the - // call0 abi, but it's not really needed. - movi a2, 1 - wsr.windowstart a2 // window start = 1 - wsr.windowbase a0 // window base = 0 - rsync - - // NOTE: a0 may no longer be zero here, because - // we wrote to WindowBase. So clear it again. - movi a0, 0 -#endif - - // Now, BEFORE we do any L32R (or MOVI with non-immediate - // range which results in an L32R), ensure LITBASE is set - // correctly. This is necessary for RAM applications loaded - // using a target-side debug monitor -- such applications - // don't have a reset vector and start execution at _start. - // (This part is unnecessary if running from a reset vector.) - // The target-side debug monitor might have set LITBASE to - // anything at all, so we cannot rely on its value here. -#if XCHAL_HAVE_ABSOLUTE_LITERALS - wsr.litbase a0 // force PC-relative L32R - rsync -# if XSHAL_USE_ABSOLUTE_LITERALS - .begin no-absolute-literals // use PC-rel L32R to load - movi a2, _lit4_start + 0x40001 // address of absolute literals - .end no-absolute-literals // (see handlers/ResetVector.S - wsr.litbase a2 // for explanation) - rsync -# endif -#endif - - - /* - * Now "enable" the caches. - * - * NOTE: We don't *initialize* the caches here, because the loader - * (eg. target debugger agent / debug monitor, boot code, etc) - * is expected to have initialized them for us. - * - * The _memmap_cacheattr_reset symbol's value (address) is defined - * by the LSP's linker script, as generated by xt-genldscripts. - * - * (NOTE: for configs that don't have CACHEATTR or region protection, - * ie. for full MMUs, there is no equivalent cache attribute layout, - * and the following code has no effect. We assume for now that the - * application restricts itself to the static TLB entries, i.e. to - * virtual addresses 0xD0000000 thru 0xFFFFFFFF.) - */ -#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \ - || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */ - cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */ -#endif - - - - // Coprocessor option initialization -#if XCHAL_HAVE_CP - //movi a2, XCHAL_CP_MASK // enable existing CPs - // To allow creating new coprocessors using TC that are not known - // at GUI build time without having to explicitly enable them, - // all CPENABLE bits must be set, even though they may not always - // correspond to a coprocessor. - movi a2, 0xFF // enable *all* bits, to allow dynamic TIE - wsr.cpenable a2 -#endif - - // Floating point coprocessor option initialization -#if XCHAL_HAVE_FP - rsync /* wait for WSR to CPENABLE to complete before accessing FP coproc state */ - wur.fcr a0 /* clear FCR (default rounding mode, round-nearest) */ - wur.fsr a0 /* clear FSR */ -#endif - - - /* NOTE: Future releases may clear BSS here rather than in the CRT1. */ - - - /* - * Now jump to the application. This is typically the - * C run-time initialization ("CRT") which in turn calls main(): - */ - movi a4, _start - jx a4 // jump to _start - - .size _app_reset, . - _app_reset - diff --git a/src/arch/xtensa/xtos/crt1-boards.S b/src/arch/xtensa/xtos/crt1-boards.S deleted file mode 100644 index 3e2ed583e8a3..000000000000 --- a/src/arch/xtensa/xtos/crt1-boards.S +++ /dev/null @@ -1,348 +0,0 @@ -// crt1-boards.S -// -// For most hardware / boards, this code sets up the C calling context -// (setting up stack, PS, and clearing BSS) and jumps to __clibrary_start -// which sets up the C library, calls constructors and registers destructors, -// and calls main(). -// -// Control arrives here at _start from the reset vector or from crt0-app.S. - -// Copyright (c) 1998-2017 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#if CONFIG_MULTICORE -#include -#include -#endif -#include -#include -#include -#include "xtos-internal.h" - - -// Exports -.global _start - -// Imports -// __clibrary_init from C library (eg. newlib or uclibc) -// exit from C library -// main from user application -// board_init board-specific (uart/mingloss/tinygloss.c) -// xthal_dcache_all_writeback from HAL library -// __stack from linker script (see LSP Ref Manual) -// _bss_table_start from linker script (see LSP Ref Manual) -// _bss_table_end from linker script (see LSP Ref Manual) - -.type main, @function - -.type primary_core_data, @object -.type core_data_ptr, @object - -// Macros to abstract away ABI differences - -#if __XTENSA_CALL0_ABI__ -# define CALL call0 -# define CALLX callx0 -#else -# define CALL call4 -# define CALLX callx4 -#endif - -/**************************************************************************/ - - .text - .align 4 - -_start: -#if CONFIG_MULTICORE - // each core unpacks xtos structures for itself - // nevertheless core 0 initializes shared xtosstruct - get_prid a5 - movi a4, PLATFORM_PRIMARY_CORE_ID - bne a5, a4, xtos_per_core -#endif - - // _start is typically NOT at the beginning of the text segment -- - // it is always called from either the reset vector or other code - // that does equivalent initialization (such as crt0-app.S). - // - // Assumptions on entry to _start: - // - low (level-one) and medium priority interrupts are disabled - // via PS.INTLEVEL and/or INTENABLE (PS.INTLEVEL is expected to - // be zeroed, to potentially enable them, before calling main) - // - C calling context not initialized: - // - PS not initialized - // - SP not initialized - // - the following are initialized: - // - LITBASE, cache attributes, WindowBase, WindowStart, - // CPENABLE, FP's FCR and FSR, EXCSAVE[n] - - // Keep a0 zero. It is used to initialize a few things. - // It is also the return address, where zero indicates - // that the frame used by _start is the bottommost frame. - // -#if !XCHAL_HAVE_HALT || !XCHAL_HAVE_BOOTLOADER // not needed for Xtensa TX - movi a0, 0 // keep this register zero. -#endif - -#if XTOS_RESET_UNNEEDED && !XCHAL_HAVE_HALT -#include "reset-unneeded.S" -#endif - -#if !CONFIG_MULTICORE - // Init xtos struct ptr - movi a2, primary_core_data - movi a3, core_data_ptr - s32i a2, a3, 0 -#else - // Init xtos struct ptr - movi a2, 4 - mull a2, a2, a5 - movi a3, core_data_ptr - add a3, a3, a2 - movi a2, primary_core_data - s32i a2, a3, 0 - - // Initialize the stack pointer. - // See the "ABI and Software Conventions" chapter in the - // Xtensa ISA Reference manual for details. - - // NOTE: Because the _start routine does not use any memory in its - // stack frame, and because all of its CALL instructions use a - // window size of 4 (or zero), the stack frame for _start can be empty. - -// Common xtos structures used by all cores. -xtos_common: - // Unpack xtos_exc_handler_table from rodata. - movi a6, xtos_exc_handler_table - movi a9, xtos_exc_handler_table_r - movi a13, XCHAL_EXCCAUSE_NUM*4 -xtos_exc_handler_table_loop: - l32i a12, a9, 0 - s32i a12, a6, 0 - addi a13, a13, -4 - addi a6, a6, 4 - addi a9, a9, 4 - bnez a13, xtos_exc_handler_table_loop - -// Xtos structures initialized per core from rodata. -xtos_per_core: -#endif -#if CONFIG_XT_BOOT_LOADER - .weak _Level2FromVector - .weak _Level3FromVector - .weak _Level4FromVector - .weak _Level5FromVector - - movi a4, _Level2FromVector - writesr excsave 2 a4 - movi a4, _Level3FromVector - writesr excsave 3 a4 - movi a4, _Level4FromVector - writesr excsave 4 a4 -#if XCHAL_INTLEVEL5_MASK - movi a4, _Level5FromVector - writesr excsave 5 a4 -#endif -#endif - -#if CONFIG_MULTICORE - get_prid a5 - movi a4, PLATFORM_PRIMARY_CORE_ID - beq a5, a4, xtos_per_core_obtain_xtos_structs - -xtos_per_core_cacheattr: -#if XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || \ - XCHAL_HAVE_XLT_CACHEATTR || \ - (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - movi a2, _memmap_cacheattr_reset /* absolute symbol, not a ptr */ - cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */ -#endif - -xtos_per_core_vecbase: -#if XCHAL_HAVE_VECBASE - /* note: absolute symbol, not a ptr */ - movi a2, _memmap_vecbase_reset - wsr.vecbase a2 -#endif - -#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */ -# if XCHAL_DCACHE_IS_COHERENT - movi a3, 0x25 /* MX -- internal for writeback, RCW otherwise */ -# else - movi a3, 0x15 /* non-MX -- always RCW */ -# endif - wsr.atomctl a3 -#endif - -// Obtain core structs from given address. -xtos_per_core_obtain_xtos_structs: - get_prid a5 - movi a2, 4 - mull a2, a2, a5 - movi a3, core_data_ptr - add a3, a3, a2 - l32i a3, a3, 0 - -xtos_per_core_init_intstruct: - movi a2, 0 - s32i a2, a3, 0 - movi a2, 0xFFFFFFFF - s32i a2, a3, 4 - addi a3, a3, XTOS_INTSTRUCT_SIZE_PER_CORE - -xtos_per_core_init_interrupt_table: - // Setup iterator. - movi a6, XCHAL_NUM_INTERRUPTS - - // Setup init constants. - movi a2, xtos_unhandled_interrupt - - 1: - addi a6, a6, -1 - s32i a2, a3, 0 - s32i a6, a3, 4 - // Int handler size should be 8. - addi a3, a3, 8 - bnez a6, 1b - -xtos_per_core_init_interrupt_mask: - movi a6, XCHAL_NUM_INTERRUPTS - movi a5, ~XCHAL_LOWPRI_MASK - movi a7, -2 - - 2: - // i <==> a6 - addi a6, a6, -1 - // a4 := 1 << i - movi a4, 1 - ssl a6 - sll a4, a4 - // a2 := (-2 * (1 << i) - 1) | ~XCHAL_LOWPRI_MASK - mull a2, a4, a7 - addi a2, a2, -1 - or a2, a2, a5 - - s32i a2, a3, 0 - s32i a4, a3, 4 - // Int handler size should be 8. - addi a3, a3, 8 - addi a4, a4, 1 - bnez a6, 2b - - // Assign stack ptr before PS is initialized to avoid any debugger - // side effects and prevent from double exception. - xtos_stack_addr_percore sp, a3, _stack_sentry, _sof_core_s_start, SOF_STACK_SIZE -#else /* CONFIG_MULTICORE */ - movi sp, __stack -#endif - - /* - * Now that sp (a1) is set, we can set PS as per the application - * (user vector mode, enable interrupts, enable window exceptions if applicable). - */ -#if XCHAL_HAVE_EXCEPTIONS - movi a3, PS_UM|PS_WOE_ABI // PS.WOE = 0|1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 - wsr.ps a3 - rsync -#endif - - /* - * Do any initialization that affects the memory map, such as - * setting up TLB entries, that needs to be done before we can - * successfully clear BSS (e.g. if some BSS segments are in - * remapped areas). - * - * NOTE: This hook works where the reset vector does not unpack - * segments (see "ROM packing" in the LSP manual), or where - * unpacking of segments is not affected by memory remapping. - * If ROM unpacking is affected, TLB setup must be done in - * assembler from the reset vector. - * - * The __memmap_init() routine can be a C function, however it - * does not have BSS initialized! In particular, __memmap_init() - * cannot set BSS variables, i.e. uninitialized global variables - * (they'll be wiped out by the following BSS clear), nor can it - * assume they are yet initialized to zero. - * - * The __memmap_init() function is optional. It is marked as a - * weak symbol, so that it gets valued zero if not defined. - */ - .weak __memmap_init - movi a4, __memmap_init - beqz a4, 1f - CALLX a4 -1: - -/* bootloader takes care of zeroing BSS */ -#if !CONFIG_XT_BOOT_LOADER - /* - * Clear the BSS (uninitialized data) segments. - * This code supports multiple zeroed sections (*.bss). - * - * Register allocation: - * a0 = 0 - * a6 = pointer to start of table, and through table - * a7 = pointer to end of table - * a8 = start address of bytes to be zeroed - * a9 = end address of bytes to be zeroed - * a10 = length of bytes to be zeroed - */ - movi a0, 0 - movi a6, _bss_table_start - movi a7, _bss_table_end - bgeu a6, a7, .L3zte - -.L0zte: l32i a8, a6, 0 // get start address, assumed multiple of 4 - l32i a9, a6, 4 // get end address, assumed multiple of 4 - addi a6, a6, 8 // next entry - sub a10, a9, a8 // a10 = length, assumed a multiple of 4 - bbci.l a10, 2, .L1zte - s32i a0, a8, 0 // clear 4 bytes to make length multiple of 8 - addi a8, a8, 4 -.L1zte: bbci.l a10, 3, .L2zte - s32i a0, a8, 0 // clear 8 bytes to make length multiple of 16 - s32i a0, a8, 4 - addi a8, a8, 8 -.L2zte: srli a10, a10, 4 // length is now multiple of 16, divide by 16 - floopnez a10, clearzte - s32i a0, a8, 0 // clear 16 bytes at a time... - s32i a0, a8, 4 - s32i a0, a8, 8 - s32i a0, a8, 12 - addi a8, a8, 16 - floopend a10, clearzte - - bltu a6, a7, .L0zte // loop until end of table of *.bss sections -.L3zte: -#endif - - // Call: int main(int argc, char ** argv, char ** environ); - CALL main - // Does not return here. - - .data - // Mark argc/argv/envp parameters as weak so that an external - // object file can override them. - .text - - .size _start, . - _start diff --git a/src/arch/xtensa/xtos/crt1-sim.S b/src/arch/xtensa/xtos/crt1-sim.S deleted file mode 100644 index 90687927449f..000000000000 --- a/src/arch/xtensa/xtos/crt1-sim.S +++ /dev/null @@ -1,290 +0,0 @@ -// crt1-sim.S -// For the Xtensa simulator target, this code sets up the C calling context -// and calls main() (via __clibrary_start). -// Control arrives here at _start from the reset vector or from crt0-app.S. - -// Copyright (c) 1998-2017 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include -#include "xtos-internal.h" - - -// Exports -.global _start - -// Imports -// __clibrary_init from C library (eg. newlib or uclibc) -// exit from C library -// main from user application -// __stack from linker script (see LSP Ref Manual) - -.type __clibrary_init, @function -.type main, @function -.type exit, @function - - -// Macros to abstract away ABI differences - -#if __XTENSA_CALL0_ABI__ -# define CALL call0 -# define CALLX callx0 -# define ARG1 a2 /* 1st outgoing call argument */ -# define ARG2 a3 /* 2nd outgoing call argument */ -# define ARG3 a4 /* 3rd outgoing call argument */ -# define ARG4 a5 /* 4th outgoing call argument */ -# define ARG5 a6 /* 5th outgoing call argument */ -#else -# define CALL call4 -# define CALLX callx4 -# define ARG1 a6 /* 1st outgoing call argument */ -# define ARG2 a7 /* 2nd outgoing call argument */ -# define ARG3 a8 /* 3rd outgoing call argument */ -# define ARG4 a9 /* 4th outgoing call argument */ -# define ARG5 a10 /* 5th outgoing call argument */ -#endif - - .data - .weak _start_envp // allow overriding - .align 4 -_start_envp: .word 0 // empty environ - - - - .text - .align 4 - -_start: - // _start is typically NOT at the beginning of the text segment -- - // it is always called from either the reset vector or other code - // that does equivalent initialization (such as crt0-app.S). - // - // Assumptions on entry to _start: - // - low (level-one) and medium priority interrupts are disabled - // via PS.INTLEVEL and/or INTENABLE (PS.INTLEVEL is expected to - // be zeroed, to potentially enable them, before calling main) - // - C calling context not initialized: - // - PS not initialized - // - SP not initialized - // - the following are initialized: - // - LITBASE, cache attributes, WindowBase, WindowStart, - // CPENABLE, FP's FCR and FSR, EXCSAVE[n] - - // Keep a0 zero. It is used to initialize a few things. - // It is also the return address, where zero indicates - // that the frame used by _start is the bottommost frame. - // - movi a0, 0 // keep this register zero. - -#if XTOS_RESET_UNNEEDED -#include "reset-unneeded.S" -#endif - - - // Initialize the stack pointer. - // See the "ABI and Software Conventions" chapter in the - // Xtensa ISA Reference manual for details. - - // NOTE: Because the _start routine does not use any memory in its - // stack frame, and because all of its CALL instructions use a - // window size of 4, the stack frame for _start can be empty. - movi sp, __stack - - // reserve stack space for - // - argv array - // - argument strings - movi a2, SYS_iss_argv_size - simcall // returns size of argv[] + its strings in a2 -#if XCHAL_HAVE_PIF - // The stack only needs 16-byte alignment. - // However, here we round up the argv size further to 128 byte multiples - // so that in most cases, variations in argv[0]'s path do not result in - // different stack allocation. Otherwise, such variations can impact - // execution timing (eg. due to cache effects etc) for the same code and data. - // If we have a PIF, it's more likely the extra required space is okay. - addi a2, a2, 127 - srli a2, a2, 7 - slli a2, a2, 7 -#else - // Keep stack 16-byte aligned. - addi a2, a2, 15 - srli a2, a2, 4 - slli a2, a2, 4 -#endif - // No need to use MOVSP because we have no caller (we're the - // base caller); in fact it's better not to use MOVSP in this - // context, to avoid unnecessary ALLOCA exceptions and copying - // from undefined memory: - // sub a3, sp, a2 - // movsp sp, a3 - sub sp, sp, a2 - - - /* - * Now that sp (a1) is set, we can set PS as per the application - * (user vector mode, enable interrupts, enable window exceptions if applicable). - */ -#if XCHAL_HAVE_EXCEPTIONS - movi a3, PS_UM|PS_WOE_ABI // PS.WOE = 0|1, PS.UM = 1, PS.EXCM = 0, PS.INTLEVEL = 0 - wsr.ps a3 - rsync -#endif - - - /* - * Do any initialization that affects the memory map, such as - * setting up TLB entries, that needs to be done before we can - * successfully clear BSS (e.g. if some BSS segments are in - * remapped areas). - * - * NOTE: This hook works where the reset vector does not unpack - * segments (see "ROM packing" in the LSP manual), or where - * unpacking of segments is not affected by memory remapping. - * If ROM unpacking is affected, TLB setup must be done in - * assembler from the reset vector. - * - * The __memmap_init() routine can be a C function, however it - * does not have BSS initialized! In particular, __memmap_init() - * cannot set BSS variables, i.e. uninitialized global variables - * (they'll be wiped out by the following BSS clear), nor can it - * assume they are yet initialized to zero. - * - * The __memmap_init() function is optional. It is marked as a - * weak symbol, so that it gets valued zero if not defined. - */ - .weak __memmap_init - movi a4, __memmap_init - beqz a4, 1f - CALLX a4 -1: - -#if !XCHAL_HAVE_BOOTLOADER /* boot loader takes care of zeroing BSS */ - - /* If a system-specific BSS init routine is defined, call it. - * Such a routine must be named __bss_init(). It can be a C - * function, however it must be written to be able to work - * with BSS not yet initialized. This function is optional. - * It is marked as a weak symbol, so that it gets value zero - * if not defined. - */ - .weak __bss_init - movi a4, __bss_init - beqz a4, 2f - movi ARG1, _bss_table_start - movi ARG2, _bss_table_end - CALLX a4 - j .Lnobss // skip default BSS init code -2: - - /* The new ISS simcall only appeared after RB-2007.2: */ -#if (XCHAL_HW_MAX_VERSION > XTENSA_HWVERSION_RB_2007_2) - /* - * Clear the BSS (uninitialized data) segments. - * This code supports multiple zeroed sections (*.bss). - * For speed, we clear memory using an ISS simcall - * (see crt1-boards.S for more generic BSS clearing code). - */ - movi a6, _bss_table_start - movi a7, _bss_table_end - bgeu a6, a7, .Lnobss -.Lbssloop: - movi a2, SYS_memset - l32i a3, a6, 0 // arg1 = fill start address - movi a4, 0 // arg2 = fill pattern - l32i a5, a6, 4 // get end address - addi a6, a6, 8 // next bss table entry - sub a5, a5, a3 // arg3 = fill size in bytes - simcall // memset(a3,a4,a5) - bltu a6, a7, .Lbssloop // loop until end of bss table -#endif /* XCHAL_HW_MAX_VERSION */ -.Lnobss: -#endif /* XCHAL_HAVE_BOOTLOADER */ - - - /* - * Call __clibrary_init to initialize the C library: - * - * void __clibrary_init(int argc, char ** argv, char ** environ, - * void(*init_func)(void), void(*fini_func)(void)); - */ - - // Get argv with the arguments from the ISS - mov a3, sp // tell simcall where to write argv[] - movi a2, SYS_iss_set_argv - simcall // write argv[] array at a3 - - movi a2, SYS_iss_argc - simcall // put argc in a2 - - -// Alternative smaller code for Xtensa TX. -// Many starting with simulation assume a full C env, so NOT DONE FOR NOW. -// -//#if XCHAL_HAVE_HALT -// -// // Assume minimalist environment for memory-constrained TX cores. -// // No C library or board initialization, and no call to exit(). -// // However, in the interest of software regressions, for now we -// // still pass parameters to main (but not the rarely used envp). -// -// //mov ARG1, a2 // argc already in a2. -// mov ARG2, sp // argv -// CALL main -// halt -// -//#else /* !HALT */ -// ... - - -#if __XTENSA_CALL0_ABI__ - mov a12, a2 // save argc (a2 is ARG1) -#else - mov ARG1, a2 // argc -#endif - mov ARG2, sp // argv - movi ARG3, _start_envp // envp - movi ARG4, _init // _init - movi ARG5, _fini // _fini - CALL __clibrary_init - - // Call: int main(int argc, char ** argv, char ** environ); -#if __XTENSA_CALL0_ABI__ - mov ARG1, a12 // argc -#else - mov ARG1, a2 // argc -#endif - mov ARG2, sp // argv - movi ARG3, _start_envp // envp = [0] - CALL main - // The return value is the same register as the first outgoing argument. - CALL exit // exit with main's return value - // Does not return here. - - .size _start, . - _start - - -// Local Variables: -// mode:fundamental -// comment-start: "// " -// comment-start-skip: "// *" -// End: diff --git a/src/arch/xtensa/xtos/crt1-tiny.S b/src/arch/xtensa/xtos/crt1-tiny.S deleted file mode 100644 index d1684c5e76ad..000000000000 --- a/src/arch/xtensa/xtos/crt1-tiny.S +++ /dev/null @@ -1,127 +0,0 @@ -// crt1-tiny.S -// -// This is a reduced version of the code in crt1-boards.S . -// For most hardware / boards, this code sets up the C calling context -// (setting up stack, PS, and clearing BSS) and calls main(). -// It has some limitations (see LSP Ref Manual for details) such as: -// - does not setup the C library (...) -// - does not call C++ static constructors and destructors -// - only clears .bss , not other *.bss sections -// -// Control arrives here at _start from the reset vector or from crt0-app.S. - -// Copyright (c) 1998-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include - - -// Imports -// __stack from linker script (see LSP Ref Manual) -// _bss_start from linker script (see LSP Ref Manual) -// _bss_end from linker script (see LSP Ref Manual) -// main from user application - - - -#ifdef __XTENSA_CALL0_ABI__ -# define CALL call0 -#else -# define CALL call4 -#endif - - -/**************************************************************************/ - - .text - .align 4 - .global _start -_start: - // _start is typically NOT at the beginning of the text segment -- - // it is always called from either the reset vector or other code - // that does equivalent initialization (such as crt0-app.S). - // See crt1-boards.S for assumptions on entry to _start , - // and for comments on what is being done in this file. - -#if !XCHAL_HAVE_HALT || !XCHAL_HAVE_BOOTLOADER // not needed for Xtensa TX - movi a0, 0 // mark base of call stack -#endif - - movi sp, __stack // setup the stack - -#if XCHAL_HAVE_EXCEPTIONS - movi a3, PS_UM|PS_WOE_ABI // PS: WOE=0|1, UM=1, EXCM=0, INTLEVEL=0 - wsr.ps a3 // setup PS for the application - rsync -#endif - - - // Clear the BSS (uninitialized data) segment. - // - // This code only supports .bss, not multiple *.bss sections. - // Corresponding code in crt1-boards.S does, and is faster but bigger. - -#if !XCHAL_HAVE_BOOTLOADER - movi a6, _bss_start - movi a7, _bss_end - bgeu a6, a7, 2f -1: s32i a0, a6, 0 - addi a6, a6, 4 - bltu a6, a7, 1b -2: -#endif - - // We can now call C code, the C calling environment is initialized. - // This tiny C runtime assumes main is declared as "void main(void)" - // rather than with the usual argc,argv. So there are no arguments. - - CALL main - - // In this tiny C runtime, main() is not expected to return. - // If it does, just loop forever. - - //CALL xthal_dcache_all_writeback // sync dirty dcaches to memory - //extw // sync TIE queues/ports/etc (LX or later only) - -.L0: -#if XCHAL_HAVE_HALT - halt -#else -# if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2013_2 /* SIMCALL is NOP in hw? */ - movi a2, SYS_exit - simcall // exit if in simulator, else NOP -# endif -# if XCHAL_HAVE_DEBUG - break 1, 15 // give control to debugger -# endif -#endif - j .L0 - - .size _start, . - _start - - -// Local Variables: -// mode:fundamental -// comment-start: "// " -// comment-start-skip: "// *" -// End: diff --git a/src/arch/xtensa/xtos/debug-vector.S b/src/arch/xtensa/xtos/debug-vector.S deleted file mode 100644 index cf16dcecaec3..000000000000 --- a/src/arch/xtensa/xtos/debug-vector.S +++ /dev/null @@ -1,73 +0,0 @@ -// debug-vector.S -- Debug Exception Vector -// $Id: //depot/rel/Eaglenest/Xtensa/OS/xtos/debug-vector.S#2 $ - -// Copyright (c) 2003-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -#include -#include -#include -#ifdef SIMULATOR -#include -#endif - -#if XCHAL_HAVE_DEBUG && XCHAL_HAVE_EXCEPTIONS - - // This code goes at the debug exception vector - - .begin literal_prefix .DebugExceptionVector - .section .DebugExceptionVector.text, "ax" - .align 4 - .global _DebugExceptionVector -_DebugExceptionVector: - - isync_erratum453 -#if ((defined(SIMULATOR) || \ - (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2013_2)) \ - && !CONFIG_GDB_DEBUG) /* SIMCALL is NOP in hw? */ - // In the simulator (ISS), let the debugger (if any is attached) - // handle the debug exception, else simply stop the simulation: - // - simcall // have ISS handle the debug exception -# endif -# if (!defined(SIMULATOR) && !CONFIG_GDB_DEBUG) - // For hardware, this code does not handle debug exceptions. - // To implement a target-side debug monitor, replace this - // vector with a real one that uses target-specific facilities - // to communicate with the debugger. - // -1: - // unexpected debug exception, loop in low-power mode - //waiti XCHAL_DEBUGLEVEL - j 1b // infinite loop - unexpected debug exception -# endif /*!SIMULATOR && !CONFIG_GDB_DEBUG*/ - -#if CONFIG_GDB_DEBUG - xsr a2, DEBUG_EXCSAVE - jx a2 -#endif - - .end literal_prefix - .size _DebugExceptionVector, . - _DebugExceptionVector - -#endif /* XCHAL_HAVE_DEBUG && XCHAL_HAVE_EXCEPTIONS */ diff --git a/src/arch/xtensa/xtos/deprecated.S b/src/arch/xtensa/xtos/deprecated.S deleted file mode 100644 index db746dbfa3aa..000000000000 --- a/src/arch/xtensa/xtos/deprecated.S +++ /dev/null @@ -1,122 +0,0 @@ -// deprecated.S -- Deprecated assembler functions - -// Copyright (c) 2003-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2 - - .text - -/* - void _xtos_timer__delta(int cycles) - - Set the timer from the current ccount. - DEPRECATED. PLEASE USE xthal_set_ccompare() AND xthal_get_ccount() - INSTEAD, OR USE DIFFERENT ALGORITHM THAT UPDATES CCOMPAREn RELATIVE TO - LAST CCOMPAREn FOR DRIFT-FREE PERIODIC TIMER INTERRUPTS. -*/ - .macro define_timer_delta num, numtimers - .if ((\num-\numtimers) & ~0xFFF) // num < numtimers ? - .align 4 - .global _xtos_timer_&num&_delta - .type _xtos_timer_&num&_delta,@function -_xtos_timer_&num&_delta: - abi_entry - rsr.ccount a3 - add a3, a3, a2 - writesr ccompare \num a3 - abi_return - .size _xtos_timer_&num&_delta, . - _xtos_timer_&num&_delta - .endif - .endm - - - -#if defined(__SPLIT__t0_delta) - - define_timer_delta 0, XCHAL_NUM_TIMERS - -#elif defined(__SPLIT__t1_delta) - - define_timer_delta 1, XCHAL_NUM_TIMERS - -#elif defined(__SPLIT__t2_delta) - - define_timer_delta 2, XCHAL_NUM_TIMERS - -#elif defined(__SPLIT__t3_delta) - - define_timer_delta 3, XCHAL_NUM_TIMERS - - -#elif defined(__SPLIT__read_ints) - - // - // u32 _xtos_read_ints( void ) - // - // _xtos_read_ints() reads the INTERRUPT register and returns it. - // DEPRECATED. - // [Kept temporarily because it was documented in T1050 System SW Ref Manual.] - // - .text - .align 4 - .global _xtos_read_ints - .type _xtos_read_ints,@function -_xtos_read_ints: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - rsr.interrupt a2 -#else /*XCHAL_HAVE_INTERRUPTS*/ - movi a2, 0 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - .size _xtos_read_ints, . - _xtos_read_ints - - -#elif defined(__SPLIT__clear_ints) - - // - // void _xtos_clear_ints( u32 mask ) - // - // _xtos_clear_ints() clears selected bits of the INTERRUPT register. - // DEPRECATED. - // [Kept temporarily because it was documented in T1050 System SW Ref Manual.] - // - .text - .align 4 - .global _xtos_clear_ints - .type _xtos_clear_ints,@function -_xtos_clear_ints: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - wsr.intclear a2 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - .size _xtos_clear_ints, . - _xtos_clear_ints - -#endif /* splitting */ - -#endif /* XEA1 or XEA2 */ - diff --git a/src/arch/xtensa/xtos/double-vector.S b/src/arch/xtensa/xtos/double-vector.S deleted file mode 100644 index f52e38462e4f..000000000000 --- a/src/arch/xtensa/xtos/double-vector.S +++ /dev/null @@ -1,98 +0,0 @@ -// double-vector.S -- Double Exception Vector -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/double-vector.S#1 $ - -// Copyright (c) 2000-2004, 2006, 2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#ifdef SIMULATOR -#include -#endif - - -#if XCHAL_HAVE_EXCEPTIONS && defined(XCHAL_DOUBLEEXC_VECTOR_VADDR) - -/* - * This is a very minimalist implementation of the double - * exception vector. For XEA2 configurations without a - * full MMU, this vector is only expected to be executed - * upon fatal errors (exceptions that occur within critical - * sections of exception vectors and handlers). - * - * For configurations with a full MMU (ie. with TLBs and - * auto-refill) and the windowed address registers option, - * a more complete version of this handler is necessary if: - * dynamic page mapping is implemented, and the stack - * can ever point to a dynamically mapped area. - * In this case, a double exception is a normal occurrence - * when a stack access within a window handler causes - * a TLB miss exception or other expected MMU fault. - * XTOS does not support this scenario, hence a minimalist - * double exception vector is sufficient. - */ - - .begin literal_prefix .DoubleExceptionVector - .section .DoubleExceptionVector.text, "ax" - - .align 4 - .global _DoubleExceptionVector -_DoubleExceptionVector: - -# if XCHAL_HAVE_DEBUG -1: break 1,4 // unhandled double exception -# elif defined(SIMULATOR) - wsr.excsave1 a2 // save a2 where simulator expects it - movi a2, SYS_unhandled_double_exc -1: simcall // let simulator/debugger report unhandled exception -# else -1: -# endif - j 1b // infinite loop - - // NOTE: a non-minimalist vector may choose to - // process the double exception in the vector itself - // (by default, much more space is allocated to double - // exception vectors than to most other vectors); - // or, to jump to a double exception handler located - // elsewhere. If only the normal case of double - // exceptions occurring within a window handler is - // being handled, then it is safe to use EXCSAVE_1 to - // do this jump (window handlers don't use EXCSAVE_1). - // For example: - // - // wsr.excsave1 a0 - // movi a0, _DoubleExceptionFromVector - // jx a0 - // - // .text - // .align 4 - // .global _DoubleExceptionFromVector - //_DoubleExceptionFromVector: - // ... - - - .size _DoubleExceptionVector, . - _DoubleExceptionVector - .text - .end literal_prefix - -#endif /* have double exceptions */ - diff --git a/src/arch/xtensa/xtos/exc-sethandler.c b/src/arch/xtensa/xtos/exc-sethandler.c deleted file mode 100644 index 9df2eff8ea89..000000000000 --- a/src/arch/xtensa/xtos/exc-sethandler.c +++ /dev/null @@ -1,70 +0,0 @@ - -/* exc-sethandler.c - register an exception handler in XTOS */ - -/* - * Copyright (c) 1999-2017 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_EXCEPTIONS - -extern void xtos_c_wrapper_handler(void *arg); /* assembly-level wrapper for C handlers */ -extern void xtos_unhandled_exception(void *arg); /* assembly-level handler for exceptions - with no registered handler */ -extern void xtos_p_none(void *arg); /* default/empty C handler */ - - -extern _xtos_handler xtos_c_handler_table[XCHAL_EXCCAUSE_NUM]; -extern _xtos_handler xtos_exc_handler_table[XCHAL_EXCCAUSE_NUM]; - -/* - * Register a C handler for the specified general exception - * (specified EXCCAUSE value). - */ -_xtos_handler _xtos_set_exception_handler( int n, _xtos_handler f ) -{ - _xtos_handler ret = 0; - - if( n < XCHAL_EXCCAUSE_NUM ) { - _xtos_handler func = f; - - if( func == 0 ) { - func = &xtos_p_none; - } - ret = xtos_c_handler_table[n]; - xtos_exc_handler_table[n] = ( (func == &xtos_p_none) - ? &xtos_unhandled_exception - : &xtos_c_wrapper_handler ); - xtos_c_handler_table[n] = func; - if( ret == &xtos_p_none ) { - ret = 0; - } - } - - return ret; -} - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/exc-syscall-handler.S b/src/arch/xtensa/xtos/exc-syscall-handler.S deleted file mode 100644 index 9c647ce22a32..000000000000 --- a/src/arch/xtensa/xtos/exc-syscall-handler.S +++ /dev/null @@ -1,192 +0,0 @@ -/* exc-syscall-handler.S - XTOS syscall instruction handler */ - -/* - * Copyright (c) 1999-2010 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The SYSCALL instruction is typically used to implement system calls. - * By convention, register a2 identifies the requested system call. - * Typically, other parameters are passed in registers a3 and up, - * and results are returned in a2. - * - * The Xtensa windowed ABI reserves the value zero of register a2 - * as a request to force register windows to the stack. The call0 ABI, - * which has no equivalent operation, reserves this value as a no-op. - * - * Generally, only code that traverses the stack in unusual ways needs - * to force (spill) register windows to the stack. In generic C or C++, - * there are four cases, and they all use the standard SYSCALL mechanism: - * - * 1. C++ exceptions - * 2. setjmp and longjmp - * 3. functions using the GNU extension "__builtin_return_address" - * 4. functions using the GNU extension "nonlocal goto" - * - * NOTE: Specific operating systems often need to spill register windows - * to the stack in other situations such as context-switching, passing - * Unix-like signals to threads, displaying stack tracebacks, etc. - * They may choose to use the SYSCALL mechanism to do so, or use other - * means such as calling xthal_window_spill() or other methods. - * - * If you want to handle other system calls, you can modify this file, or - * use the C version of it in exc-syscall-handler.c . The Xtensa ABIs only - * define system call zero; the behavior of other system calls is up to you. - */ - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_EXCEPTIONS - -//Vector: -// addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. -// s32i a2, a1, UEXC_a2 -// s32i a3, a1, UEXC_a3 -// movi a3, xtos_exc_handler_table -// rsr.exccause a2 -// addx4 a2, a2, a3 -// l32i a2, a2, 0 -// s32i a4, a1, UEXC_a4 -// jx a2 // jump to cause-specific handler - - .global _need_user_vector_ // pull-in real user vector (tiny LSP) - - - /* - * The SYSCALL handler is entered when the processor - * executes the SYSCALL instruction. - * By convention, the system call to execute is specified in a2. - */ - .text - .align 4 - .global _xtos_syscall_handler -_xtos_syscall_handler: - // HERE: a2, a3, a4 have been saved to the exception stack frame allocated with a1 (sp). - // We ignore that a4 was saved, we don't clobber it. - - rsr.epc1 a3 -#if XCHAL_HAVE_LOOPS - // If the SYSCALL instruction was the last instruction in the body of - // a zero-overhead loop, and the loop will execute again, decrement - // the loop count and resume execution at the head of the loop: - // - rsr.lend a2 - addi a3, a3, 3 // increment EPC to skip the SYSCALL instruction - bne a2, a3, 1f - rsr.lcount a2 - beqz a2, 1f - addi a2, a2, -1 - wsr.lcount a2 - rsr.lbeg a3 -1: l32i a2, a1, UEXC_a2 // get the system call number -#else - // No loop registers. - l32i a2, a1, UEXC_a2 // get the system call number - addi a3, a3, 3 // increment EPC to skip the SYSCALL instruction -#endif - wsr.epc1 a3 // update EPC1 past SYSCALL - l32i a3, a1, UEXC_a3 // restore a3 - // If you want to handle other system calls, check a2 here. - -#ifdef __XTENSA_WINDOWED_ABI__ - bnez a2, .Lnotzero // is syscall number zero? - - /* Spill register windows to the stack. */ - - // Save a2 thru a5 in the nested-C-function area, where an interrupt - // won't clobber them. The pseudo-CALL's ENTRY below clobbers a4 and a5. - //s32i a2, a1, (ESF_TOTALSIZE - 32) + 0 // a2 is zero, no need to save - s32i a3, a1, (ESF_TOTALSIZE - 32) + 4 - s32i a4, a1, (ESF_TOTALSIZE - 32) + 8 - s32i a5, a1, (ESF_TOTALSIZE - 32) + 12 - - movi a3, PS_WOE|PS_CALLINC(1)|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) // CALL4 emulation - rsr.ps a2 // save PS in a2 - wsr.ps a3 // PS.INTLEVEL=EXCMLEVEL (1 for XEA1) - // HERE: window overflows enabled but NOT SAFE yet, touch only a0..a3 until it's safe. - rsr.epc1 a3 // save EPC1 in a3 - addi a1, a1, ESF_TOTALSIZE // restore sp (dealloc ESF) for sane stack again - rsync // wait for WSR to PS to complete - // HERE: Window overflows and interrupts are safe, we saved EPC1 and - // restored a1, and a4-a15 are unmodified. - // Pseudo-CALL: make it look as if the code that executed SYSCALL - // made a CALL4 to here. See user exc. handler comments for details. - // ENTRY cannot cause window overflow; touch a4 to ensure a4-a7 - // overflow if needed: - movi a4, 0 // clears pseudo-CALL's return PC - // NOTE: On XEA1 processors, return from window overflow re-enables - // interrupts (by clearing PS.INTLEVEL). This is okay even though SP - // is unallocated because we saved state safe from interrupt dispatch. - .global _SyscallException -_SyscallException: // label makes tracebacks look nicer - _entry a1, 64 // as if after a CALL4 (PS.CALLINC==1) - // Call deep enough to force spill of entire address register file. - _call12 __deep_call -1: movi a14, 0x80000000 + .Ldelta_done - add a0, a12, a14 // clear a0 msbit (per CALL4), offset -3: retw // return from pseudo-CALL4 - - // NOTE: a5 still contains the exception window's exception stack frame pointer. -.LMdon: wsr.ps a2 // for XEA2, this sets EXCM; for XEA1, this sets INTLEVEL to 1; ... - movi a2, 0 // indicate successful SYSCALL (?) - l32i a4, a5, 32 + 8 - rsync // complete WSR to PS for safe write to EPC1 - wsr.epc1 a3 - l32i a3, a5, 32 + 4 - l32i a5, a5, 32 + 12 - rfe_rfue - - .set .Ldelta_retw, (3b - 1b) - .set .Ldelta_done, (.LMdon - 1b) - - .align 4 - .local __deep_call -__deep_call: - entry a1, 48 -#if XCHAL_NUM_AREGS < 64 - mov a15, a15 // touch just far enough to overflow 32 -#else - movi a12, .Ldelta_retw // use movi/add because of relocation - add a12, a0, a12 // set return PC as per CALL12 - _entry a1, 48 // last call was call12 so PS.CALLINC==3 - mov a12, a0 // set return PC - _entry a1, 48 - mov a12, a0 // set return PC - _entry a1, 16 - mov a11, a11 // touch just far enough to overflow 64 -#endif - retw - -#endif /* __XTENSA_WINDOWED_ABI__ */ - -.Lnotzero: - movi a2, -1 /*ENOSYS*/ // system call not supported - addi a1, a1, ESF_TOTALSIZE - rfe_rfue - - .size _xtos_syscall_handler, . - _xtos_syscall_handler - - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/exc-table.S b/src/arch/xtensa/xtos/exc-table.S deleted file mode 100644 index a1281332e930..000000000000 --- a/src/arch/xtensa/xtos/exc-table.S +++ /dev/null @@ -1,61 +0,0 @@ -// exc-table.S - general exception C handler table - -// Copyright (c) 1999-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_EXCEPTIONS - - /* - * Table of exception handlers (in C) for user vectored exceptions. - * Provides entries for all possible 64 exception causes - * currently allowed for in the EXCCAUSE register. - */ - .data - .global xtos_c_handler_table - .align 4 -xtos_c_handler_table: - .rept XCHAL_EXCCAUSE_NUM - .word xtos_p_none - .endr - - /* - * Default/empty exception C handler. - * This is just a placeholder for exception causes with no registered - * handler; it normally never gets executed. - * NOTE: control goes first to the debugger if one is present; - * see xtos_unhandled_exception in exc-unhandled.S . - */ - .text - .align 4 - .global xtos_p_none - .type xtos_p_none,@function -xtos_p_none: - abi_entry - // Do nothing. - abi_return - .size xtos_p_none, . - xtos_p_none - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/exc-unhandled.S b/src/arch/xtensa/xtos/exc-unhandled.S deleted file mode 100644 index 2f569c0c8e49..000000000000 --- a/src/arch/xtensa/xtos/exc-unhandled.S +++ /dev/null @@ -1,83 +0,0 @@ -// exc-unhandled.S - General Exception Handler for unhandled exceptions - -// Copyright (c) 2002-2004, 2006, 2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include "xtos-internal.h" -#ifdef SIMULATOR -#include -#endif - -#if XCHAL_HAVE_EXCEPTIONS - -/* - * Assembly-level exception handler used when no handler was - * registered for the specific exception cause. - * - * The BREAK instruction is used to give control to the debugger, - * if one is present and active. (If none is present and active, - * the default debug exception handler will typically skip over - * this break instruction.) - * This code follows the convention documented in the ISA manual - * to use BREAK instructions to signal unhandled exceptions to the - * debugger. For the debugger to report or handle this condition - * in an OS-independent manner, all processor state (except PC) - * must be restored as it was when the unhandled exception just - * occurred (ie. as it was at the beginning of the vector). - * If execution continues after the BREAK instruction (in which - * case any register might have been modified by the debugger), - * just return. - */ - .text - .align 4 - - // If not pulled-in some other way, define it as unhandled: - .weak xtos_cause3_handler - .global xtos_cause3_handler -xtos_cause3_handler: - - .global xtos_unhandled_exception -xtos_unhandled_exception: -#if XCHAL_HAVE_DEBUG || defined(SIMULATOR) - l32i a2, a1, UEXC_a2 // restore a2 - l32i a3, a1, UEXC_a3 // restore a3 - // Note: a4-a5 not clobbered, no need to restore. - addi a1, a1, ESF_TOTALSIZE // restore sp -# if XCHAL_HAVE_DEBUG - break 1, 1 // give control to the debugger (if any present) -# else - wsr.excsave1 a2 // save a2 where simulator expects it - movi a2, SYS_unhandled_user_exc - simcall // let simulator/debugger report unhandled exception - rsr.excsave1 a2 // restore a2 -# endif - rfe_rfue // if sim/debug resume, just return -#else /* DEBUG or SIMULATOR */ - j xtos_unhandled_exception // just loop forever -#endif /* DEBUG or SIMULATOR */ - - .size xtos_unhandled_exception, . - xtos_unhandled_exception - - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/exit.S b/src/arch/xtensa/xtos/exit.S deleted file mode 100644 index eaee4364312b..000000000000 --- a/src/arch/xtensa/xtos/exit.S +++ /dev/null @@ -1,93 +0,0 @@ -// exit.S -// -// For hardware / boards, this is the default _exit routine called by the -// C library exit() function. If the program ever exits, we eventually -// end up here after all C library cleanup (such as closing open files, -// calling exit callbacks and C++ destructors, etc) is complete. - -// Copyright (c) 1998-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include -#include "xtos-internal.h" - -// Macros to abstract away ABI differences -#if __XTENSA_CALL0_ABI__ -# define CALL call0 -#else -# define CALL call4 -#endif - - - .text - .align 4 - .global _exit - .type _exit, @function -_exit: - abi_entry 0, 4 - -#if __XTENSA_CALL0_ABI__ - // save exit code as cache writeback will clobber a2 in call0 - mov a12, a2 -#endif - - // sync dirty data to memory before terminating -#if XCHAL_DCACHE_IS_COHERENT - CALL xthal_cache_coherence_optout -#elif XCHAL_DCACHE_IS_WRITEBACK - CALL xthal_dcache_all_writeback -#endif - - // sync queues (if any, only for LX and later): -#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RA_2004_1 /* LX or later? */ - extw -#endif - // can break to debug monitor, go to sleep with waiti, or just spin in a loop -.L0: -#if XCHAL_HAVE_HALT - halt -#else -# if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RE_2013_2 /* SIMCALL is NOP in hw? */ - // ISS expects exit code in a3 -# if __XTENSA_CALL0_ABI__ - mov a3, a12 -# else - mov a3, a2 -# endif - mov a4, a3 // save exitcode for the debugger, as simcall will erase a3 - movi a2, SYS_exit - simcall // exit if in simulator, else NOP - mov a2, a4 -# endif -# if XCHAL_HAVE_DEBUG - break 1, 15 // back to debugger, if one is attached -# endif -# if XCHAL_HAVE_INTERRUPTS - waiti 15 -# endif -#endif - j .L0 - //abi_exit - - .size _exit, . - _exit - diff --git a/src/arch/xtensa/xtos/int-handler.S b/src/arch/xtensa/xtos/int-handler.S deleted file mode 100644 index 3c86252c9aa5..000000000000 --- a/src/arch/xtensa/xtos/int-handler.S +++ /dev/null @@ -1,59 +0,0 @@ -// int-handler.S - Interrupt Handler Template (for levels > 1) -// $Id: //depot/main/Xtensa/OS/xtos/inth-template.S#1 $ - -// Copyright (c) 2003-2004, 2006 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -// To use this template file, define a macro called _INTERRUPT_LEVEL -// to be the interrupt priority level of the vector, then include this file. -// The default Makefile defines _INTERRUPT_LEVEL when assembling this file -// for each medium and high priority interrupt level. - - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_INTERRUPTS - -# if INTERRUPT_IS_HI(_INTERRUPT_LEVEL) - -# if _INTERRUPT_LEVEL > XTOS_LOCKLEVEL - /* Not safe to dispatch interrupts in C above XTOS_LOCKLEVEL, - * so default to assembly high-priority interrupt handlers template - * in this case (with the default XTOS_LOCKLEVEL this never happens): - */ -# include "int-highpri-template.S" -# else - /* Dispatch high-priority interrupt handlers in C: */ -# include "int-highpri-dispatcher.S" -# endif - -# elif INTERRUPT_IS_MED(_INTERRUPT_LEVEL) - -# include "int-medpri-dispatcher.S" - -# elif (_INTERRUPT_LEVEL <= XCHAL_NUM_INTLEVELS) && (_INTERRUPT_LEVEL != XCHAL_DEBUGLEVEL) -# error INTERNAL ERROR: Interrupt priority levels > 1 must be either hi or medium priority! -# endif - -#endif /* XCHAL_HAVE_INTERRUPTS */ diff --git a/src/arch/xtensa/xtos/int-highpri-dispatcher.S b/src/arch/xtensa/xtos/int-highpri-dispatcher.S deleted file mode 100644 index a3c919bd47fd..000000000000 --- a/src/arch/xtensa/xtos/int-highpri-dispatcher.S +++ /dev/null @@ -1,514 +0,0 @@ -// High-Priority Interrupt Dispatcher Template -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/int-highpri-dispatcher.S#1 $ - -// Copyright (c) 2004-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// -// This file allows writing high-priority interrupt handlers in C, -// providing convenience at a significant cost in performance. -// -// By default, this file is included by inth-template.S . -// The default Makefile defines _INTERRUPT_LEVEL when assembling -// inth-template.S for each medium and high priority interrupt level. -// -// To use this template file, define a macro called _INTERRUPT_LEVEL -// to be the interrupt priority level of the vector, then include this file. - - -#if CONFIG_MULTICORE -#include -#endif -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_INTERRUPTS && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) - -#define INTERRUPT_MASK XCHAL_INTLEVEL_MASK(_INTERRUPT_LEVEL) -#define SINGLE_INTERRUPT ((INTERRUPT_MASK & (INTERRUPT_MASK - 1)) == 0) -#define SINGLE_INT_NUM XCHAL_INTLEVEL_NUM(_INTERRUPT_LEVEL) - - -#define INTLEVEL_N_MASK INTERRUPT_MASK // mask of interrupts at this priority -#define INTLEVEL_N_NUM SINGLE_INT_NUM // interrupt number if there is only one -#define INTLEVEL_N_BELOW_MASK XCHAL_INTLEVEL_ANDBELOW_MASK(_INTERRUPT_LEVEL) - -/* Indicates whether there are multiple interrupts at this interrupt - * priority, ie. mapped to this interrupt vector. - * If there is only one, its number is INTLEVEL_N_NUM - */ -#define MULTIPLE_INTERRUPTS (!SINGLE_INTERRUPT) - -/* - * High priority interrupt stack frame: - */ -STRUCT_BEGIN -STRUCT_FIELD (long,4,HESF_,SAR) -STRUCT_FIELD (long,4,HESF_,WINDOWSTART) -STRUCT_FIELD (long,4,HESF_,WINDOWBASE) -STRUCT_FIELD (long,4,HESF_,EPC1) -STRUCT_FIELD (long,4,HESF_,EXCCAUSE) -STRUCT_FIELD (long,4,HESF_,EXCVADDR) -STRUCT_FIELD (long,4,HESF_,EXCSAVE1) -STRUCT_FIELD (long,4,HESF_,VPRI) /* (XEA1 only) */ -#if XCHAL_HAVE_MAC16 -STRUCT_FIELD (long,4,HESF_,ACCLO) -STRUCT_FIELD (long,4,HESF_,ACCHI) -/*STRUCT_AFIELD(long,4,HESF_,MR, 4)*/ -#endif -#if XCHAL_HAVE_LOOPS -STRUCT_FIELD (long,4,HESF_,LCOUNT) -STRUCT_FIELD (long,4,HESF_,LBEG) -STRUCT_FIELD (long,4,HESF_,LEND) -#endif -STRUCT_AFIELD(long,4,HESF_,AREG, 64) /* address registers ar0..ar63 */ -#define HESF_AR(n) HESF_AREG+((n)*4) -STRUCT_END(HighPriFrame) -#define HESF_TOTALSIZE HighPriFrameSize+32 /* 32 bytes for interrupted code's save areas under SP */ - - -#if XCHAL_HAVE_XEA1 && HAVE_XSR /* could be made true for T1040 and T1050 */ -# error "high-priority interrupt stack frame needs adjustment if HAVE_XSR is allowed with XEA1" -#endif - - -#define PRI_N_STACK_SIZE 1024 /* default to 1 kB stack for each level-N handling */ - - - // Allocate save area and stack: - // (must use .bss, not .comm, because the subsequent .set does not work otherwise) -#if CONFIG_MULTICORE - .macro generate_stack_for_int core_id - .if GREATERTHAN(CONFIG_CORE_COUNT, \core_id) - .section .bss, "aw" - .align 16 -LABEL(_Pri_,_Stack&core_id): .space PRI_N_STACK_SIZE + HESF_TOTALSIZE - .endif - .endm - - generate_stack_for_int 0 - generate_stack_for_int 1 - generate_stack_for_int 2 - generate_stack_for_int 3 -#else - .section .bss, "aw" - .align 16 -LABEL(_Pri_,_Stack): .space PRI_N_STACK_SIZE + HESF_TOTALSIZE -#endif - -#if HAVE_XSR - .data - .global LABEL(_Pri_,_HandlerAddress) -LABEL(_Pri_,_HandlerAddress): .space 4 -#endif - - - .text - .align 4 - .global LABEL(_Level,FromVector) -LABEL(_Level,FromVector): -#if CONFIG_MULTICORE - xtos_stack_addr_percore_add a2, LABEL(_Pri_,_Stack), PRI_N_STACK_SIZE // get ptr to save area -#else - movi a2, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE -#endif - // interlock - - // Save a few registers so we can do some work: - s32i a0, a2, HESF_AR(0) -#if HAVE_XSR - //movi a0, LABEL(_Level,FromVector) // this dispatcher's address - movi a0, LABEL(_Pri_,_HandlerAddress) // dispatcher address var. - s32i a1, a2, HESF_AR(1) - l32i a0, a0, 0 // get dispatcher address - s32i a3, a2, HESF_AR(3) - xchgsr excsave _INTERRUPT_LEVEL a0 // get saved a2, restore dispatcher address -#else - readsr excsave _INTERRUPT_LEVEL a0 // get saved a2 - s32i a1, a2, HESF_AR(1) - s32i a3, a2, HESF_AR(3) -#endif - s32i a4, a2, HESF_AR(4) - s32i a0, a2, HESF_AR(2) - - // Save/restore all exception state - // (IMPORTANT: this code assumes no general exceptions occur - // during the execution of this dispatcher until this state - // is completely saved and from the point it is restored.) - // - // Exceptions that may normally occur within the C handler - // include window exceptions (affecting EPC1), alloca exceptions - // (affecting EPC1/EXCCAUSE and its handling uses EXCSAVE1), - // and possibly others depending on the particular C handler - // (possibly needing save/restore of EXCVADDR; and EXCVADDR - // is also possibly corrupted by any access thru an auto-refill - // way on a processor with a full MMU). - // - rsr.epc1 a3 - rsr.exccause a4 - s32i a3, a2, HESF_EPC1 - s32i a4, a2, HESF_EXCCAUSE -#if !XCHAL_HAVE_XEA1 - rsr.excvaddr a3 - s32i a3, a2, HESF_EXCVADDR -#endif - rsr.excsave1 a4 - s32i a4, a2, HESF_EXCSAVE1 - -#ifdef __XTENSA_WINDOWED_ABI__ - // Save remainder of entire address register file (!): - movi a0, XCHAL_NUM_AREGS - 8 // how many saved so far -#endif - - s32i a5, a2, HESF_AR(5) - s32i a6, a2, HESF_AR(6) - s32i a7, a2, HESF_AR(7) - -1: s32i a8, a2, HESF_AR(8) - s32i a9, a2, HESF_AR(9) - s32i a10, a2, HESF_AR(10) - s32i a11, a2, HESF_AR(11) - s32i a12, a2, HESF_AR(12) - s32i a13, a2, HESF_AR(13) - s32i a14, a2, HESF_AR(14) - s32i a15, a2, HESF_AR(15) - -#ifdef __XTENSA_WINDOWED_ABI__ - addi a8, a0, -8 - addi a10, a2, 8*4 - rotw 2 - bnez a0, 1b // loop until done - - rotw 2 - // back to original a2 ... - - // Save a few other registers required for C: - rsr.windowstart a3 - rsr.windowbase a4 - s32i a3, a2, HESF_WINDOWSTART - s32i a4, a2, HESF_WINDOWBASE - - // Setup window registers for first caller: - movi a3, 1 - movi a4, 0 - wsr.windowstart a3 - wsr.windowbase a4 - rsync - - // Note: register window has rotated, ie. a0..a15 clobbered. - -#endif /* __XTENSA_WINDOWED_ABI__ */ - -#if CONFIG_MULTICORE - xtos_stack_addr_percore_add a1, LABEL(_Pri_,_Stack), PRI_N_STACK_SIZE // get ptr to save area -#else - movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // get ptr to save area -#endif - movi a0, 0 // mark start of call frames in stack - - // Critical state saved, a bit more to do to allow window exceptions... - - // We now have a C-coherent stack and window state. - // Still have to fix PS while making sure interrupts stay disabled - // at the appropriate level (ie. level 2 and below are disabled in this case). - -#if XCHAL_HAVE_XEA1 -#if CONFIG_MULTICORE - xtos_addr_percore a7, xtos_intstruct // address of interrupt management globals -#else - movi a7, _xtos_intstruct // address of interrupt management globals -#endif - rsilft a3, _INTERRUPT_LEVEL, XTOS_LOCKLEVEL // lockout - movi a4, ~INTLEVEL_N_BELOW_MASK // mask out all interrupts at this level or lower - l32i a3, a7, XTOS_VPRI_ENABLED_OFS // read previous _xtos_vpri_enabled - l32i a5, a7, XTOS_ENABLED_OFS // read _xtos_enabled - s32i a4, a7, XTOS_VPRI_ENABLED_OFS // set new _xtos_vpri_enabled (mask interrupts as if at _INTERRUPT_LEVEL) - s32i a3, a1, HESF_VPRI // save previous vpri - movi a2, PS_WOECALL4_ABI + PS_UM // UM=1, INTLEVEL=0 - and a3, a5, a4 // mask out selected interrupts - wsr.intenable a3 // disable all low-priority interrupts -#else - // Load PS for C code, clear EXCM (NOTE: this step is different for XEA1): - movi a2, PS_WOECALL4_ABI + PS_UM + _INTERRUPT_LEVEL // UM=1, INTLEVEL=N, EXCM=0, RING=0 -#endif - wsr.ps a2 // update PS to enable window exceptions, etc as per above - rsync - - // Okay, window exceptions can now happen (although we have to call - // deep before any will happen because we've reset WINDOWSTART). - - // Save other state that might get clobbered by C code: - -////////////////// COMMON DISPATCH CODE BEGIN - - rsr.sar a14 - s32i a14, a1, HESF_SAR -#if XCHAL_HAVE_LOOPS - rsr.lcount a14 - s32i a14, a1, HESF_LCOUNT - rsr.lbeg a14 - s32i a14, a1, HESF_LBEG - rsr.lend a14 - s32i a14, a1, HESF_LEND -#endif -#if XCHAL_HAVE_MAC16 - rsr.acclo a14 - s32i a14, a1, HESF_ACCLO - rsr.acchi a14 - s32i a14, a1, HESF_ACCHI -#endif - -#if MULTIPLE_INTERRUPTS /* > 1 interrupts at this priority */ // _split_ multi_setup -#define TABLE_OFS 0 - - rsr.interrupt a15 // mask of pending interrupts -# if XCHAL_HAVE_XEA1 - l32i a12, a7, XTOS_ENABLED_OFS // mask of enabled interrupts -# else - rsr.intenable a12 // mask of enabled interrupts -# endif - movi a13, INTLEVEL_N_MASK // mask of interrupts at this priority level - and a15, a15, a12 - and a15, a15, a13 // enabled & pending interrupts at this priority - _beqz a15, LABEL(Pri_,_spurious) // handle spurious interrupts (eg. level-trig.) -LABEL(Pri_,_loop): // handle all enabled & pending interrupts - neg a14, a15 - and a14, a14, a15 // single-out least-significant bit set in mask - wsr.intclear a14 // clear if edge-trig. or s/w or wr/err (else no effect) - - // Compute pointer to interrupt table entry, given mask a14 with single bit set: - -# if XCHAL_HAVE_NSA -#if CONFIG_MULTICORE - xtos_addr_percore_sub a12, xtos_interrupt_table, (32-XCHAL_NUM_INTERRUPTS)*8 -#else - movi a12, xtos_interrupt_table - (32-XCHAL_NUM_INTERRUPTS)*8 -#endif - nsau a14, a14 // get index of bit in a14, numbered from msbit - addx8 a12, a14, a12 -# else /* XCHAL_HAVE_NSA */ -#if CONFIG_MULTICORE - xtos_addr_percore a12, xtos_interrupt_table // pointer to interrupt table -#else - movi a12, xtos_interrupt_table // pointer to interrupt table -#endif - bltui a14, 0x10000, 1f // in 16 lsbits? (if so, check them) - addi a12, a12, 16*8 // no, index is at least 16 entries further - // (the above ADDI expands to an ADDI+ADDMI sequence, +128 is outside its range) - extui a14, a14, 16,16 // shift right upper 16 bits -1: bltui a14, 0x100, 1f // in 8 lsbits? (if so, check them) - addi a12, a12, 8*8 // no, index is at least 8 entries further - srli a14, a14, 8 // shift right upper 8 bits -1: bltui a14, 0x10, 1f // in 4 lsbits? (if so, check them) - addi a12, a12, 4*8 // no, index is at least 4 entries further - srli a14, a14, 4 // shift right 4 bits -1: bltui a14, 0x4, 1f // in 2 lsbits? (if so, check them) - addi a12, a12, 2*8 // no, index is at least 2 entries further - srli a14, a14, 2 // shift right 2 bits -1: bltui a14, 0x2, 1f // is it the lsbit? - addi a12, a12, 1*8 // no, index is one entry further -1: // done! a12 points to interrupt's table entry -# endif /* XCHAL_HAVE_NSA */ - -#else /* !MULTIPLE_INTERRUPTS */ - -# if XCHAL_HAVE_NSA -# define TABLE_OFS 8 * (XCHAL_NUM_INTERRUPTS - 1 - INTLEVEL_N_NUM) -# else -# define TABLE_OFS 8 * INTLEVEL_N_NUM -# endif - - movi a13, INTLEVEL_N_MASK // (if interrupt is s/w or edge-triggered or write/err only) -#if CONFIG_MULTICORE - xtos_addr_percore a12, xtos_interrupt_table // get pointer to its interrupt table entry -#else - movi a12, xtos_interrupt_table // get pointer to its interrupt table entry -#endif - wsr.intclear a13 // clear the interrupt (if s/w or edge or wr/err only) - -#endif /* ifdef MULTIPLE_INTERRUPTS */ - - l32i a13, a12, TABLE_OFS + 0 // get pointer to handler from table entry -#ifdef __XTENSA_CALL0_ABI__ - l32i a2, a12, TABLE_OFS + 4 // pass single argument to C handler - callx0 a13 // call interrupt's C handler -#else - l32i a6, a12, TABLE_OFS + 4 // pass single argument to C handler - callx4 a13 // call interrupt's C handler -#endif - -#if XCHAL_HAVE_XEA1 -#if CONFIG_MULTICORE - xtos_addr_percore a7, xtos_intstruct // address of interrupt management globals -#else - movi a7, _xtos_intstruct // address of interrupt management globals -#endif -#endif -#if MULTIPLE_INTERRUPTS /* > 1 interrupts at this priority */ - rsr.interrupt a15 // get pending interrupts -# if XCHAL_HAVE_XEA1 - l32i a12, a7, XTOS_ENABLED_OFS // get enabled interrupts -# else - rsr.intenable a12 // get enabled interrupts -# endif - movi a13, INTLEVEL_N_MASK // get mask of interrupts at this priority level - and a15, a15, a12 - and a15, a15, a13 // pending+enabled interrupts at this priority - _bnez a15, LABEL(Pri_,_loop) // if any remain, dispatch one -LABEL(Pri_,_spurious): -#endif /* MULTIPLE_INTERRUPTS */ - - // Restore everything, and return. - -#if XCHAL_HAVE_EXCLUSIVE - // Clear exclusive monitors. - clrex -#endif - - // Three temp registers are required for this code to be optimal (no interlocks) in - // T2xxx microarchitectures with 7-stage pipe; otherwise only two - // registers would be needed. - // -#if XCHAL_HAVE_LOOPS - l32i a13, a1, HESF_LCOUNT - l32i a14, a1, HESF_LBEG - l32i a15, a1, HESF_LEND - wsr.lcount a13 - wsr.lbeg a14 - wsr.lend a15 -#endif - -#if XCHAL_HAVE_MAC16 - l32i a13, a1, HESF_ACCLO - l32i a14, a1, HESF_ACCHI - wsr.acclo a13 - wsr.acchi a14 -#endif - l32i a15, a1, HESF_SAR - wsr.sar a15 - -////////////////// COMMON DISPATCH CODE END - -#if XCHAL_HAVE_XEA1 - // Here, a7 = address of interrupt management globals - l32i a4, a1, HESF_VPRI // restore previous vpri - rsil a3, XTOS_LOCKLEVEL // lockout - l32i a5, a7, XTOS_ENABLED_OFS // read _xtos_enabled - s32i a4, a7, XTOS_VPRI_ENABLED_OFS // set new _xtos_vpri_enabled - movi a2, 0x00020 + _INTERRUPT_LEVEL // WOE=0, UM=1, INTLEVEL=N - and a3, a5, a4 // mask out selected interrupts - wsr.intenable a3 // disable all low-priority interrupts -#else - // Load PS for interrupt exit, set EXCM: - movi a2, 0x00030 + _INTERRUPT_LEVEL // WOE=0, CALLINC=0, UM=1, INTLEVEL=N, EXCM=1, RING=0 -#endif - wsr.ps a2 // update PS to disable window exceptions, etc as per above - rsync - - // NOTE: here for XEA1, restore INTENABLE etc... - -#ifdef __XTENSA_WINDOWED_ABI__ - // Restore window registers: - l32i a2, a1, HESF_WINDOWSTART - l32i a3, a1, HESF_WINDOWBASE - wsr.windowstart a2 - wsr.windowbase a3 - rsync - // Note: register window has rotated, ie. a0..a15 clobbered. - - // Reload initial stack pointer: -#if CONFIG_MULTICORE - xtos_stack_addr_percore_add a1, LABEL(_Pri_,_Stack), PRI_N_STACK_SIZE // - 16 -#else - movi a1, LABEL(_Pri_,_Stack) + PRI_N_STACK_SIZE // - 16 -#endif - movi a6, XCHAL_NUM_AREGS - 8 // how many saved so far - addi a7, a1, -8*4 - - // Restore entire register file (!): - -1: - addi a14, a6, -8 - addi a15, a7, 8*4 - l32i a4, a15, HESF_AR(4) - l32i a5, a15, HESF_AR(5) - l32i a6, a15, HESF_AR(6) - l32i a7, a15, HESF_AR(7) - l32i a8, a15, HESF_AR(8) - l32i a9, a15, HESF_AR(9) - l32i a10,a15, HESF_AR(10) - l32i a11,a15, HESF_AR(11) - rotw 2 - bnez a6, 1b // loop until done - - l32i a4, a7, HESF_AR(12) - l32i a5, a7, HESF_AR(13) - l32i a6, a7, HESF_AR(14) - l32i a7, a7, HESF_AR(15) - rotw 2 - - // back to original a1 ... - -#else /* Call0 ABI: */ - - l32i a4, a1, HESF_AR(4) // restore general registers - l32i a5, a1, HESF_AR(5) - l32i a6, a1, HESF_AR(6) - l32i a7, a1, HESF_AR(7) - l32i a8, a1, HESF_AR(8) - l32i a9, a1, HESF_AR(9) - l32i a10, a1, HESF_AR(10) - l32i a11, a1, HESF_AR(11) - l32i a12, a1, HESF_AR(12) - l32i a13, a1, HESF_AR(13) - l32i a14, a1, HESF_AR(14) - l32i a15, a1, HESF_AR(15) - -#endif /* __XTENSA_WINDOWED_ABI__ */ - - // Restore exception state: - l32i a2, a1, HESF_EPC1 - l32i a3, a1, HESF_EXCCAUSE - wsr.epc1 a2 - wsr.exccause a3 -#if !XCHAL_HAVE_XEA1 - l32i a2, a1, HESF_EXCVADDR - wsr.excvaddr a2 -#endif - l32i a3, a1, HESF_EXCSAVE1 - wsr.excsave1 a3 - - l32i a0, a1, HESF_AR(0) - l32i a2, a1, HESF_AR(2) - l32i a3, a1, HESF_AR(3) - l32i a1, a1, HESF_AR(1) - rfi _INTERRUPT_LEVEL - - .size LABEL(_Level,FromVector), . - LABEL(_Level,FromVector) - - // This symbol exists solely for the purpose of being able to pull-in this - // dispatcher using _xtos_dispatch_level() routines with the tiny-rt LSP: - .global LABEL(_Level,HandlerLabel) - .set LABEL(_Level,HandlerLabel), 0 - -#endif /* XCHAL_HAVE_INTERRUPTS */ - diff --git a/src/arch/xtensa/xtos/int-highpri-template.S b/src/arch/xtensa/xtos/int-highpri-template.S deleted file mode 100644 index e41cc846c0e6..000000000000 --- a/src/arch/xtensa/xtos/int-highpri-template.S +++ /dev/null @@ -1,160 +0,0 @@ -// High-Priority Interrupt Handler Template -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/int-highpri-template.S#1 $ - -// Copyright (c) 2004-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// -// This file provides skeleton code for writing high-priority interrupt -// handlers in assembler for performance. -// -// By default, this file is included by inth-template.S . -// The default Makefile defines _INTERRUPT_LEVEL when assembling -// inth-template.S for each medium and high priority interrupt level. -// -// To use this template file, define a macro called _INTERRUPT_LEVEL -// to be the interrupt priority level of the vector, then include this file. - - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_INTERRUPTS - -#define INTERRUPT_MASK XCHAL_INTLEVEL_MASK(_INTERRUPT_LEVEL) -#define SINGLE_INTERRUPT (INTERRUPT_MASK & (INTERRUPT_MASK - 1) == 0) -#define SINGLE_INT_NUM XCHAL_INTLEVEL_NUM(_INTERRUPT_LEVEL) - - -// NOTE: It is strongly recommended that high-priority -// interrupt handlers be written in assembly. -// -// High-priority interrupt handlers can be written in C, -// but only at the cost of an unreasonable amount of state -// save and restore (including the entire physical address -// register file and others, see int-highpri-dispatcher.S) -// that makes high-priority interrupt dispatching much slower -// than for low and medium priority interrupts. -// (Low and medium priority interrupts are masked by atomic -// register window operations, so they take advantage of a -// coherent window state for fast entry. High priority -// interrupts are not masked by window operations so they -// can interrupt them, leading to a potentially incoherent -// window state at the time of the interrupt. Given that -// high priority handlers must save and restore everything -// they touch, they end up needing to save and restore the -// entire window state [physical address register file etc.] -// and all exception state which they can also interrupt.) -// See also the Microprocessor Programmer's Guide. - -// High-priority interrupts are designed to be very fast and with -// very low latency. -// Typical high-priority interrupt service routines are kept -// relatively small and fast. Either there is little to do, -// or the routine handles only the necessary high priority -// activities related to a device and leaves the rest -// (other more complex and time-consuming activities) -// to be scheduled later, eg. by triggering a level-one -// (low-priority) or medium-priority software interrupt whose -// handler can be written in C for the more extensive processing. - -// NOTE: The following handler is just skeleton example -// code. It is NOT a functional handler. For software, edge- -// triggered and write-error interrupts, it simply does nothing -// and return. For other types (timer and level-triggered), -// this code does not clear the source(s) of interrupt, -// hence if any interrupt at this priority level are both enabled -// and triggered, the processor repeatedly takes the interrupt -// in a loop. This is all okay as a default, because -// XTOS (and other operating systems) clears the INTENABLE -// register at startup, requiring the application to -// enable specific interrupts before they can be taken. -// So as long as you don't enable any interrupt of this -// priority level, this example handler will never execute. - -// Exports -.global LABEL(_Level,FromVector) - - .data - .align 4 -LABEL(int,save): - .space 4 // save area - - .text - .align 4 -LABEL(_Level,FromVector): - // The vectoring code has already saved a2 in EXCSAVEn. - // Save any other registers we'll use: - movi a2, LABEL(int,save) - s32i a1, a2, 0 - // ... add more as needed (increase save area accordingly) ... - - // WRITE YOUR INTERRUPT HANDLING CODE HERE... - - // If multiple interrupts are mapped to this priority level, - // you'll probably need to distinguish which interrupt(s) - // occurred by reading the INTERRUPT (INTREAD) and - // INTENABLE registers, and'ing them together, and - // looking at what bits are set in both. - // If any of the interrupts are level-triggered, be ready - // to handle the case where no interrupts are to be handled - // -- this is called a spurious interrupt, and can happen - // when the level-triggered interrupt line goes inactive - // after the interrupt is taken but before the INTERRUPT - // register is read. - - // You'll also normally want to clear the source of - // the interrupt before returning, to avoid getting - // the same interrupt again immediately. For illustration, - // this code clears all software, edge-triggered, and - // write-error interrupts at this priority level (if any). - // NOTE: Timer interrupts must be cleared by writing to - // the corresponding CCOMPAREn register; and level-sensitive - // interrupts can only be cleared externally, usually by - // requesting the associated device to do so (in a - // device-specific manner). - // - movi a1, INTERRUPT_MASK - wsr.intclear a1 - - // Restore registers: - l32i a1, a2, 0 -#if HAVE_XSR - movi a2, LABEL(_Level,FromVector) // restore handler address - xchgsr excsave _INTERRUPT_LEVEL a2 -#else - readsr excsave _INTERRUPT_LEVEL a2 -#endif - // ... add more if more are saved above ... - -#if XCHAL_HAVE_EXCLUSIVE - // If your code used L32EX/S32EX, then clear any active excl monitors. - // Uncomment the line below. - // clrex -#endif - - // Return: - rfi _INTERRUPT_LEVEL - - .size LABEL(_Level,FromVector), . - LABEL(_Level,FromVector) - -#endif /* XCHAL_HAVE_INTERRUPTS */ diff --git a/src/arch/xtensa/xtos/int-initlevel.S b/src/arch/xtensa/xtos/int-initlevel.S deleted file mode 100644 index 1b09f2597439..000000000000 --- a/src/arch/xtensa/xtos/int-initlevel.S +++ /dev/null @@ -1,59 +0,0 @@ -// int-initlevel.S - Routines used to pull-in interrupt dispatch code -// in the tiny-rt LSP. -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/int-initlevel.S#1 $ - -// Copyright (c) 2006-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -// To assemble this template file, define a macro called _INTERRUPT_LEVEL -// to be the interrupt level of the vector. We use the same template for both -// high-level and medium-level interrupts, but not debug level. - - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_INTERRUPTS && (_INTERRUPT_LEVEL <= XCHAL_NUM_INTLEVELS) && (_INTERRUPT_LEVEL != XCHAL_DEBUGLEVEL) - - // Nothing to do at runtime. This function only has effect - // at link-time. - // - .text - .global LABEL(_xtos_dispatch_level,_interrupts) - .align 4 -LABEL(_xtos_dispatch_level,_interrupts): - abi_entry - abi_return - .size LABEL(_xtos_dispatch_level,_interrupts), . - LABEL(_xtos_dispatch_level,_interrupts) - - // This reference is what does the work of pulling-in the - // relevant interrupt vector at the specified level. - // This is only needed in the tiny-rt LSP. - // -# if _INTERRUPT_LEVEL == 1 - .global _need_level1int_ -# else - .global LABEL(_Level,Vector) -# endif - -#endif /* XCHAL_HAVE_INTERRUPTS */ diff --git a/src/arch/xtensa/xtos/int-medpri-dispatcher.S b/src/arch/xtensa/xtos/int-medpri-dispatcher.S deleted file mode 100644 index e5ea0e930604..000000000000 --- a/src/arch/xtensa/xtos/int-medpri-dispatcher.S +++ /dev/null @@ -1,401 +0,0 @@ -// Medium-Priority Interrupt Dispatcher Template -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/int-medpri-dispatcher.S#1 $ - -// Copyright (c) 2004-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// -// By default, this file is included by inth-template.S . -// The default Makefile defines _INTERRUPT_LEVEL when assembling -// inth-template.S for each medium and high priority interrupt level. -// -// To use this template file, define a macro called _INTERRUPT_LEVEL -// to be the interrupt priority level of the vector, then include this file. - -#include - -#include -#include "xtos-internal.h" - - -#if XCHAL_HAVE_INTERRUPTS - -#define INTERRUPT_MASK XCHAL_INTLEVEL_MASK(_INTERRUPT_LEVEL) -#define SINGLE_INTERRUPT ((INTERRUPT_MASK & (INTERRUPT_MASK - 1)) == 0) -#define SINGLE_INT_NUM XCHAL_INTLEVEL_NUM(_INTERRUPT_LEVEL) - - -// Strict non-preemptive prioritization - - - .text - .align 4 - .global LABEL(_Level,FromVector) -LABEL(_Level,FromVector): - -/* Allocate an exception stack frame, save a2, a4, and a5, and fix PS as: - * - * if not Call0 ABI - * - enable windowing for 'entry' (ps.woe=1, ps.excm=0) - * - setup ps.callinc to simulate call4 - * endif - * - preserve user mode - * - mask all interrupts at EXCM_LEVEL and lower - * - * Then deallocate the stack, 'rsync' for the write to PS, then use - * 'entry' to re-allocate the stack frame and rotate the register - * window (like a call4, preserving a0..a3). */ - -#if HAVE_XSR - xchgsr excsave _INTERRUPT_LEVEL a2 -#else - readsr excsave _INTERRUPT_LEVEL a2 -#endif - addi a1, a1, -ESF_TOTALSIZE - s32i a2, a1, UEXC_a2 - movi a2, PS_WOECALL4_ABI|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) - s32i a4, a1, UEXC_a4 - s32i a5, a1, UEXC_a5 - wsr.ps a2 - rsync - - /* store ps and pc */ - readsr eps, _INTERRUPT_LEVEL a2 - s32i a2, a1, UEXC_ps - readsr epc, _INTERRUPT_LEVEL a2 - s32i a2, a1, UEXC_pc - - /* store rest of the registers */ - s32i a0, a1, UEXC_a0 - s32i a3, a1, UEXC_a3 - s32i a6, a1, UEXC_a6 - s32i a7, a1, UEXC_a7 - s32i a8, a1, UEXC_a8 - s32i a9, a1, UEXC_a9 - s32i a10, a1, UEXC_a10 - s32i a11, a1, UEXC_a11 - s32i a12, a1, UEXC_a12 - s32i a13, a1, UEXC_a13 - s32i a14, a1, UEXC_a14 - s32i a15, a1, UEXC_a15 - - /* store current sp */ - xtos_addr_percore a2, xtos_saved_sp - s32i a1, a2, 0 - - /* store current task sp if context exists (not first irq) */ - xtos_task_ctx_percore a2 - beqz a2, no_context - s32i a1, a2, TC_stack_pointer - -no_context: -# if XTOS_CNEST - l32i a2, a1, ESF_TOTALSIZE-20 // save nested-C-func call-chain ptr -# endif - addi a1, a1, ESF_TOTALSIZE -# if XTOS_DEBUG_PC - readsr epc _INTERRUPT_LEVEL a4 // [for debug] get return PC - movi a5, 0xC0000000 // [for debug] setup call size... - or a4, a5, a4 // [for debug] set upper two bits of return PC - addx2 a4, a5, a4 // [for debug] clear upper bit -# else - movi a4, 0 /* terminate stack frames, overflow check */ -# endif - _entry a1, ESF_TOTALSIZE - -/* Reset the interrupt level to xtos locklevel (lvl 6 on most systems) */ - - rsil a15, XTOS_LOCKLEVEL - -#if SINGLE_INTERRUPT /* if only one interrupt at this priority level... */ - -/* Preserve the SAR, loop, MAC16 regs and coprocessors. Also, clear the interrupt. */ - -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp0 - xchal_cp0_store a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp1 - xchal_cp1_store a11, a12, a13, a14, a15 -#endif -#endif - rsr.sar a14 - movi a12, INTERRUPT_MASK - s32i a14, a1, UEXC_sar - wsr.intclear a12 // clear if edge-trig or s/w or wr/err (else no effect) - save_loops_mac16 a1, a13, a14 - - /* switch to interrupt stack */ - xtos_int_stack_addr_percore a13, _INTERRUPT_LEVEL, xtos_stack_for_interrupt - s32i a1, a13, 0 - addi a1, a13, SOF_STACK_SIZE - - /* set stack base and size for interrupt context */ - xtos_addr_percore a11, xtos_interrupt_ctx - s32i a13, a11, TC_stack_base - movi a13, SOF_STACK_SIZE - s32i a13, a11, TC_stack_size - - /* save task context */ - xtos_task_ctx_percore a13 - xtos_store_percore a13, a14, xtos_saved_ctx - - /* set interrupt task context */ - xtos_task_ctx_store_percore a11, a14 - -/* Load the handler from the table, initialize two args (interrupt - * number and exception stack frame), then call the interrupt handler. - * Note: The callx12 preserves the original user task's a4..a15.*/ - - xtos_on_wakeup - -#if CONFIG_MULTICORE - xtos_addr_percore_add a12, xtos_interrupt_table, MAPINT(SINGLE_INT_NUM)*XIE_SIZE -#else - movi a12, xtos_interrupt_table + (MAPINT(SINGLE_INT_NUM) * XIE_SIZE) -#endif - l32i a13, a12, XIE_HANDLER - l32i a14, a12, XIE_ARG - mov a15, a1 - callx12 a13 - -#else /* > 1 interrupts at this priority level */ - -/* Get bit list of pending interrupts at the current interrupt priority level. - * If bit list is empty, interrupt is spurious (can happen if a - * genuine interrupt brings control this direction, but the interrupt - * goes away before we read the INTERRUPT register). Also save off - * sar, loops, mac16 registers and coprocessors. */ - -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp0 - xchal_cp0_store a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp1 - xchal_cp1_store a11, a12, a13, a14, a15 -#endif -#endif - rsr.interrupt a15 - rsr.intenable a12 - movi a13, INTERRUPT_MASK - and a15, a15, a12 - and a15, a15, a13 - rsr.sar a14 - s32i a14, a1, UEXC_sar - save_loops_mac16 a1, a13, a14 - - /* switch to interrupt stack */ - xtos_int_stack_addr_percore a13, _INTERRUPT_LEVEL, xtos_stack_for_interrupt - s32i a1, a13, 0 - addi a1, a13, SOF_STACK_SIZE - - _beqz a15, LABEL(spurious,int) - - /* set stack base and size for interrupt context */ - xtos_addr_percore a11, xtos_interrupt_ctx - s32i a13, a11, TC_stack_base - movi a13, SOF_STACK_SIZE - s32i a13, a11, TC_stack_size - - /* save task context */ - xtos_task_ctx_percore a13 - xtos_store_percore a13, a14, xtos_saved_ctx - - /* set interrupt task context */ - xtos_task_ctx_store_percore a11, a14 - - xtos_on_wakeup - -/* Loop to handle all pending interrupts. */ - -LABEL(.L1,_loop0): - neg a12, a15 - and a12, a12, a15 - wsr.intclear a12 // clear if edge-trig or s/w or wr/err (else no effect) -#if CONFIG_MULTICORE - xtos_addr_percore a13, xtos_interrupt_table -#else - movi a13, xtos_interrupt_table -#endif - find_ms_setbit a15, a12, a14, 0 - mapint a15 - addx8 a12, a15, a13 - l32i a13, a12, XIE_HANDLER - l32i a14, a12, XIE_ARG - mov a15, a1 - callx12 a13 - - rsr.interrupt a15 - rsr.intenable a12 - movi a13, INTERRUPT_MASK - and a15, a15, a12 - and a15, a15, a13 - _bnez a15, LABEL(.L1,_loop0) - -#endif /* SINGLE_INTERRUPT */ - -/* Restore everything, and return. */ - - /* restore task context if needed */ - xtos_task_ctx_percore a11 - xtos_addr_percore a12, xtos_interrupt_ctx - bne a11, a12, restore_cp - xtos_addr_percore a12, xtos_saved_ctx - xtos_task_ctx_store_percore a12, a11 - -restore_cp: -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - xtos_task_ctx_percore a11 - beqz a11, no_context_2 - l32i a11, a11, TC_stack_pointer - addi a11, a11, UEXC_cp0 - xchal_cp0_load a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - xtos_task_ctx_percore a11 - beqz a11, no_context_2 - l32i a11, a11, TC_stack_pointer - addi a11, a11, UEXC_cp1 - xchal_cp1_load a11, a12, a13, a14, a15 -#endif -#endif - -no_context_2: - restore_loops_mac16 a1, a13, a14, a15 - l32i a14, a1, UEXC_sar -LABEL(spurious,int): - -#if XCHAL_HAVE_EXCLUSIVE - // Clear exclusive monitors. - clrex -#endif - - movi a0, LABEL(return,from_exc) - movi a13, 0xC0000000 - wsr.sar a14 - or a0, a0, a13 - addx2 a0, a13, a0 -# if _INTERRUPT_LEVEL < XCHAL_EXCM_LEVEL -/* Raise the interrupt mask before - * returning to avoid a race condition where we deallocate the - * exception stack frame but still have more register values to - * restore from it. */ - rsil a14, XCHAL_EXCM_LEVEL -# endif - retw -LABEL(return,from_exc): - /* a5 contains interrupt stack pointer */ - addi a5, a5, -SOF_STACK_SIZE - l32i a5, a5, 0 - -# if XTOS_CNEST - s32i a2, a5, ESF_TOTALSIZE-20 // restore nested-C-func call-chain ptr -# endif - - /* store sp after returning from handler */ - s32i a1, a5, UEXC_a1 - -restore: - /* load registers for window spill */ - l32i a4, a5, UEXC_a4 - l32i a6, a5, UEXC_a6 - l32i a7, a5, UEXC_a7 - l32i a8, a5, UEXC_a8 - l32i a9, a5, UEXC_a9 - l32i a10, a5, UEXC_a10 - l32i a11, a5, UEXC_a11 - l32i a12, a5, UEXC_a12 - l32i a13, a5, UEXC_a13 - l32i a14, a5, UEXC_a14 - - /* check if switch is needed */ - xtos_addr_percore a2, xtos_saved_sp - xtos_task_ctx_percore a1 - beqz a1, noSwitch - l32i a1, a1, TC_stack_pointer - l32i a0, a2, 0 - beq a0, a1, noSwitch - -doSwitch: - /* store new task sp */ - s32i a1, a2, 0 - - /* restore sp of task being preempted */ - l32i a1, a5, UEXC_a1 - - /* spill register windows to the stack */ - rsr.ps a2 - movi a3, PS_WOE_MASK - xor a2, a2, a3 - wsr.ps a2 - - call0 xthal_window_spill_nw - - /* restore previous ps */ - rsr.ps a2 - movi a3, PS_WOE_MASK - or a2, a2, a3 - wsr.ps a2 - - /* change stack */ - xtos_addr_percore a5, xtos_saved_sp - l32i a5, a5, 0 - j restore - -noSwitch: - /* restore ps and pc */ - l32i a0, a5, UEXC_ps - writesr eps _INTERRUPT_LEVEL a0 - rsync - l32i a0, a5, UEXC_pc - writesr epc _INTERRUPT_LEVEL a0 - - /* restore sar, loops and mac16 registers */ - l32i a0, a5, UEXC_sar - wsr.sar a0 - restore_loops_mac16 a5, a0, a1, a2 - - /* restore rest of the registers */ - l32i a0, a5, UEXC_a0 - l32i a1, a5, UEXC_a1 - l32i a2, a5, UEXC_a2 - l32i a3, a5, UEXC_a3 - l32i a15, a5, UEXC_a15 - l32i a5, a5, UEXC_a5 - rfi _INTERRUPT_LEVEL - - .size LABEL(_Level,FromVector), . - LABEL(_Level,FromVector) - - // This symbol exists solely for the purpose of being able to pull-in this - // dispatcher using _xtos_dispatch_level() routines with the tiny-rt LSP: - .global LABEL(_Level,HandlerLabel) - .set LABEL(_Level,HandlerLabel), 0 - -#endif /* XCHAL_HAVE_INTERRUPT */ diff --git a/src/arch/xtensa/xtos/int-sethandler.c b/src/arch/xtensa/xtos/int-sethandler.c deleted file mode 100644 index 22737981beea..000000000000 --- a/src/arch/xtensa/xtos/int-sethandler.c +++ /dev/null @@ -1,88 +0,0 @@ - -/* int-sethandler.c - register an interrupt handler in XTOS */ - -/* - * Copyright (c) 1999-2017 Cadence Design Systems, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "xtos-internal.h" -#include "xtos-structs.h" -#include - - -#if XCHAL_HAVE_INTERRUPTS -#if CONFIG_MULTICORE -extern struct xtos_core_data *core_data_ptr[CONFIG_CORE_COUNT]; -#else -/* - * Table of interrupt handlers. - * NOTE: if the NSA/NSAU instructions are configured, then to save - * a few cycles in the interrupt dispatcher code, the - * xtos_interrupt_table[] array is filled in reverse. - * IMPORTANT: Use the MAPINT() macro defined in xtos-internal.h to index entries in this array. - */ -extern XtosIntHandlerEntry xtos_interrupt_table[XCHAL_NUM_INTERRUPTS]; -#endif -#endif - -_xtos_handler _xtos_set_interrupt_handler_arg( int n, _xtos_handler f, void *arg ) -{ -#if XCHAL_HAVE_INTERRUPTS - XtosIntHandlerEntry *entry; - _xtos_handler old; - _xtos_handler ret; - - if( (n < 0) || (n >= XCHAL_NUM_INTERRUPTS) ) { - ret = 0; /* invalid interrupt number */ - } - else if( (int) Xthal_intlevel[n] > XTOS_LOCKLEVEL ) { - ret = 0; /* priority level too high to safely handle in C */ - } - else { -#if CONFIG_MULTICORE - entry = &(core_data_ptr[cpu_get_id()]->xtos_int_data.xtos_interrupt_table.array[MAPINT(n)]); -#else - entry = xtos_interrupt_table + MAPINT(n); -#endif - old = entry->handler; - if (f) { - entry->handler = f; - entry->u.varg = arg; - } else { - entry->handler = &xtos_unhandled_interrupt; - entry->u.narg = n; - } - ret = (old == &xtos_unhandled_interrupt) ? 0 : old; - } - - return ret; -#else - return 0; -#endif -} - - -_xtos_handler _xtos_set_interrupt_handler( int n, _xtos_handler f ) -{ - return _xtos_set_interrupt_handler_arg( n, f, (void *) n ); -} - diff --git a/src/arch/xtensa/xtos/int-vector.S b/src/arch/xtensa/xtos/int-vector.S deleted file mode 100644 index f23ad50844b7..000000000000 --- a/src/arch/xtensa/xtos/int-vector.S +++ /dev/null @@ -1,70 +0,0 @@ -// int-vector.S - Interrupt Vector Template (for levels > 1) -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/int-vector.S#1 $ - -// Copyright (c) 2003-2017 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -// To use this template file, define a macro called _INTERRUPT_LEVEL -// to be the interrupt level of the vector, include "xtos-internal.h", -// then include this file. We use the same template for both high-level -// and medium-level interrupts, but not debug level. - - -#define _ASMLANGUAGE -#include "xtos-internal.h" -#include -#include - - -#if XCHAL_HAVE_INTERRUPTS && (_INTERRUPT_LEVEL <= XCHAL_NUM_INTLEVELS) && (_INTERRUPT_LEVEL != XCHAL_DEBUGLEVEL) - - .begin literal_prefix LABEL(.Level,InterruptVector) - .section LABEL(.Level,InterruptVector.text), "ax" - .align 4 - .global LABEL(_Level,Vector) -LABEL(_Level,Vector): - // Medium and high priority interrupt vector: - -# if HAVE_XSR - // With XSR, we can use this vector which has the advantage of being ROMable - // without requiring the handler to also be in ROM; however, it requires - // initializing the EXCSAVEn register (see reset-vector.S) and a slightly - // different save/restore sequence in the handler: - hw_erratum_487_fix - xchgsr excsave _INTERRUPT_LEVEL a2 - jx a2 - - // Pull-in the real handler by reference, to ensure the reset vector gets it: - .global LABEL(_Level,FromVector) - -# else - writesr excsave _INTERRUPT_LEVEL a2 - movi a2, LABEL(_Level,FromVector) - jx a2 -# endif - - .size LABEL(_Level,Vector), . - LABEL(_Level,Vector) - .text - .end literal_prefix - -#endif /* interrupt at that level */ - diff --git a/src/arch/xtensa/xtos/interrupt-pri.h b/src/arch/xtensa/xtos/interrupt-pri.h deleted file mode 100644 index ae22cd2b5761..000000000000 --- a/src/arch/xtensa/xtos/interrupt-pri.h +++ /dev/null @@ -1,177 +0,0 @@ -/* interrupt-pri.h - Definitions and macros related to interrupt prioritization */ -/* - * Copyright (c) 2002-2004, 2006 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#if !defined(_ASMLANGUAGE) && !defined(__ASSEMBLER__) -# error "The interrupt-pri.h header file is meant for inclusion by assembly source code only." -#endif - -#include -#include "xtos-internal.h" - -/* - * The following macros are used by int-lowpri-dispatcher.S to - * implement prioritized interrupt dispatching and fairness. - * The prioritization scheme is set by XTOS parameters in xtos-params.h . - */ - - -#if XCHAL_HAVE_INTERRUPTS - - // msindex_int - // - // Return in register \aindex the index of the first (most significant) bit set - // in register \amask. - // Register \amask is clobbered (modified) by this macro. - // - // Note: this code is similar to the find_ms_setbit macro in . - // - .macro msindex_int aindex, amask -# if XCHAL_HAVE_NSA - nsau \aindex, \amask // \aindex = interrupt index, from 0 to 31, from left to right - //movi \amask, 31 - //sub \aindex, \amask, \aindex -# else - movi \aindex, 0 // start with result of 0 (point to lsbit of 32) -# if XCHAL_NUM_INTERRUPTS > 16 - bltui \amask, 0x10000, 2f // is it one of the 16 lsbits? (if so, check lower 16 bits) - addi \aindex, \aindex, 16 // no, increment result to upper 16 bits (of 32) - extui \amask, \amask, 16, 16 // check upper half (shift right 16 bits) -2: -# endif -# if XCHAL_NUM_INTERRUPTS > 8 - bltui \amask, 0x100, 2f // is it one of the 8 lsbits? (if so, check lower 8 bits) - addi \aindex, \aindex, 8 // no, increment result to upper 8 bits (of 16) - srli \amask, \amask, 8 // shift right to check upper 8 bits -2: -# endif -# if XCHAL_NUM_INTERRUPTS > 4 - bltui \amask, 0x10, 2f // is it one of the 4 lsbits? (if so, check lower 4 bits) - addi \aindex, \aindex, 4 // no, increment result to upper 4 bits (of 8) - srli \amask, \amask, 4 // shift right 4 bits to check upper half -2: -# endif - bltui \amask, 0x4, 2f // is it one of the 2 lsbits? (if so, check lower 2 bits) - addi \aindex, \aindex, 2 // no, increment result to upper 2 bits (of 4) - srli \amask, \amask, 2 // shift right 2 bits to check upper half -2: - bltui \amask, 0x2, 2f // is it the lsbit? - addi \aindex, \aindex, 1 // no, increment result to upper bit (of 2) -2: // done! -# endif /*!NSA*/ - // HERE: \aindex = index of interrupt to handle - // \amask is available - .endm - - - // msindex_int_nc - // - // Same as msindex_int, but does not clobber \amask. - // Uses extra register \atmp (a temporary register) if needed. - // - .macro msindex_int_nc aindex, amask, atmp -# if XCHAL_HAVE_NSA - msindex_int \aindex, \amask // does not clobber \amask in this case -# else - mov \atmp, \amask - msindex_int \aindex, \atmp -# endif - .endm - - - // indexmask_int - // - // Compute index of highest priority interrupt in given mask, - // and trim mask to single bit corresponding to that interrupt. - // This is used for interrupt dispatching. - // - // Entry: - // \index = (undefined) - // \mask = non-zero mask of interrupt bits to consider handling - // \intptr = &_xtos_intstruct if INTENABLE virtualized, else undefined - // \tmp = (undefined) - // Exit: - // \index = index of interrupt (reversed if NSA present) - // \mask = single bit corresponding to index - // \intptr = (preserved) - // \tmp = (clobbered) - // - .macro indexmask_int index, mask, intptr, tmp -# if XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_LO - - msindex_int \index, \mask // \index = index of msbit set in \mask (\tmp is tmp, \mask clobbered) - // \index now contains the index of the highest priority pending+enabled interrupt. -# if XCHAL_HAVE_NSA - movi \mask, 0x80000000 - ssr \index - srl \mask, \mask // \mask = single bit set corresponding to interrupt to be processed... -# else - movi \mask, 1 - ssl \index - sll \mask, \mask // \mask = single bit set corresponding to interrupt to be processed... -# endif - -# elif XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_HI - - neg \index, \mask // find lsbit in \mask ... - and \mask, \index, \mask // ... - msindex_int_nc \index, \mask, \tmp // \index = index of msbit set in \mask (\tmp is tmp, \mask not clobbered) - -# else -# error Unsupported priority ordering. -# endif /*SUBPRI_ORDER*/ - .endm - - - // index_int - // - // Compute index of highest priority interrupt in given mask. - // This is used for fairness computations. - // - // Entry: - // \index = (undefined) - // \mask = non-zero mask of interrupt bits to consider handling - // \intptr = &_xtos_intptr - // \tmp = (undefined) - // Exit: - // \index = index of interrupt (reversed if NSA present) - // \mask = (preserved) - // \intptr = (preserved) - // \tmp = (clobbered) - // - .macro index_int index, mask, intptr, tmp -# if XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_LO - msindex_int_nc \index, \mask, \tmp // \index = index of msbit set in \mask (\mask not clobbered) -# elif XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_HI - neg \tmp, \mask // find lsbit in \mask ... - and \tmp, \tmp, \mask // ... - msindex_int \index, \tmp // \index = index of msbit set in \tmp (\tmp is clobbered) -# else -# error oops -# endif - .endm // index_int - - -#endif /* XCHAL_HAVE_INTERRUPTS */ - - diff --git a/src/arch/xtensa/xtos/interrupt-table.S b/src/arch/xtensa/xtos/interrupt-table.S deleted file mode 100644 index 795a0a53cb04..000000000000 --- a/src/arch/xtensa/xtos/interrupt-table.S +++ /dev/null @@ -1,129 +0,0 @@ -// interrupt-table.S - Interrupt handler table and default handler - -// Copyright (c) 2004-2017 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include "xtos-internal.h" - -#if !CONFIG_MULTICORE -#if XCHAL_HAVE_INTERRUPTS - - .data - - .global _xtos_intstruct - .align 8 -_xtos_intstruct: -# if XTOS_VIRTUAL_INTENABLE - .global _xtos_enabled - .type _xtos_enabled,@object - .size _xtos_enabled,4 - .global _xtos_vpri_enabled - .type _xtos_vpri_enabled,@object - .size _xtos_vpri_enabled,4 -_xtos_enabled: .word 0 -_xtos_vpri_enabled: .word 0xFFFFFFFF -# endif -# if XTOS_VIRTUAL_INTERRUPT -# error Virtualized INTERRUPT register not yet supported. - .global _xtos_pending - .type _xtos_pending,@object - .size _xtos_pending,4 -_xtos_pending: .word 0 -# endif - - /* - * Table of C-level interrupt handlers (and args, etc) for each interrupt. - * NOTE: if the NSA/NSAU instructions are configured, then to save a few - * cycles in the interrupt dispatcher code, this table is filled in reverse. - * C code uses the MAPINT() macro defined in xtos-internal.h to index entries. - * NOTE: Under some conditions (turned off by default in xtos-params.h), - * this table gets properly initialized by the _xtos_init() function in - * init.c . NOTA: A future enhancement may be to always configure - * and build this table at build-time rather than ever doing it at run-time. - */ -#define i .Li /* workaround a silly GDB testsuite regression */ - .data - .global xtos_interrupt_table - .align 8 -xtos_interrupt_table: - .set i, XCHAL_HAVE_NSA*(XCHAL_NUM_INTERRUPTS-1) - .rept XCHAL_NUM_INTERRUPTS - .word xtos_unhandled_interrupt - .word i // parameter: interrupt number - .set i, i+1-(XCHAL_HAVE_NSA*2) - .endr - -# if XIE_EXTEND - /* MUST *IMMEDIATELY* follow xtos_interrupt_table: */ - .global xtos_interrupt_mask_table -xtos_interrupt_mask_table: - .set i, XCHAL_HAVE_NSA*(XCHAL_NUM_INTERRUPTS-1) - .rept XCHAL_NUM_INTERRUPTS - /* Default to all low-priority (level-one) interrupts at their own virtual priority: */ -# if XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_HI - .word ((1< XCHAL_DEBUGLEVEL - rsil a3, XCHAL_DEBUGLEVEL-1 // ensure break takes effect -# endif - break 1, 15 // unhandled (unregistered) interrupt $a2 -# else -1: j 1b // unhandled interrupt - loop forever -# endif - abi_return - - .size xtos_unhandled_interrupt, . - xtos_unhandled_interrupt - -#endif /* XCHAL_HAVE_INTERRUPTS */ - diff --git a/src/arch/xtensa/xtos/ints-off.S b/src/arch/xtensa/xtos/ints-off.S deleted file mode 100644 index 66b851f58593..000000000000 --- a/src/arch/xtensa/xtos/ints-off.S +++ /dev/null @@ -1,78 +0,0 @@ -// ints-off.S - Interrupt related assembler code - _xtos_ints_off - -// Copyright (c) 2004-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include "xtos-internal.h" - - -/*************************************************************************** - * _xtos_ints_on() and _xtos_ints_off() are used - * to enable and disable interrupts from C code; - * they can be called from the application or from a C interrupt handler. - */ - -// u32 _xtos_ints_off( u32 mask ); [T1050.0 docs this as returning old INTENABLE value] -// Disables a set of interrupts. See _xtos_ints_on(). -// -// MUST NOT be called when PS.INTLEVEL > XTOS_LOCKLEVEL -// (otherwise PS.INTLEVEL gets lowered; and operation may be inconsistent -// if this is called in the handler of an interrupt of level > LOCKLEVEL). -// - .text - .align 4 - .global _xtos_ints_off - .type _xtos_ints_off,@function -_xtos_ints_off: - abi_entry -#if XCHAL_HAVE_INTERRUPTS && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) -# if XTOS_VIRTUAL_INTENABLE -#if CONFIG_MULTICORE - xtos_addr_percore a4, xtos_intstruct -#else - movi a4, _xtos_intstruct -#endif - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - l32i a3, a4, XTOS_ENABLED_OFS // a3 = xtos_enabled - l32i a6, a4, XTOS_VPRI_ENABLED_OFS // a6 = xtos_vpri_enabled - or a5, a3, a2 // a5 = xtos_enabled | mask - xor a5, a5, a2 // a5 = xtos_enabled & ~mask - s32i a5, a4, XTOS_ENABLED_OFS // xtos_enabled &= ~mask - and a5, a5, a6 // a5 = xtos_enabled & xtos_vpri_enabled -# else - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - rsr.intenable a3 - //interlock - or a5, a3, a2 // a5 = INTENABLE | mask - xor a5, a5, a2 // a5 = INTENABLE & ~mask -# endif - wsr.intenable a5 - xtos_unlock a7 - mov a2, a3 // return previous (virtual or real) INTENABLE value -#else /*XCHAL_HAVE_INTERRUPTS*/ - movi a2, 0 // this config does not have interrupts, so return 0 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - - .size _xtos_ints_off, . - _xtos_ints_off - diff --git a/src/arch/xtensa/xtos/ints-on.S b/src/arch/xtensa/xtos/ints-on.S deleted file mode 100644 index 0e6acc8deb64..000000000000 --- a/src/arch/xtensa/xtos/ints-on.S +++ /dev/null @@ -1,79 +0,0 @@ -// ints-on.S - Interrupt related assembler code - _xtos_ints_on - -// Copyright (c) 2004-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include "xtos-internal.h" - - -/*************************************************************************** - * _xtos_ints_on() and _xtos_ints_off() are used - * to enable and disable interrupts from C code; - * they can be called from the application or from a C interrupt handler. - */ - - -// u32 _xtos_ints_on( u32 mask ); -// Enables a set of interrupts. -// With INTENABLE virtualizing, does not simply set INTENABLE directly, but rather -// computes it as a function of the current virtual priority. -// -// MUST NOT be called when PS.INTLEVEL > XTOS_LOCKLEVEL -// (otherwise PS.INTLEVEL gets lowered; and operation may be inconsistent -// if this is called in the handler of an interrupt of level > LOCKLEVEL). -// - .text - .align 4 - .global _xtos_ints_on - .type _xtos_ints_on,@function -_xtos_ints_on: - abi_entry -#if XCHAL_HAVE_INTERRUPTS && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) -# if XTOS_VIRTUAL_INTENABLE -#if CONFIG_MULTICORE - xtos_addr_percore a4, xtos_intstruct -#else - movi a4, _xtos_intstruct -#endif - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - l32i a3, a4, XTOS_ENABLED_OFS // a3 = xtos_enabled - l32i a6, a4, XTOS_VPRI_ENABLED_OFS // a6 = xtos_vpri_enabled - or a5, a3, a2 // xtos_enabled | mask - s32i a5, a4, XTOS_ENABLED_OFS // xtos_enabled |= mask - and a5, a5, a6 // a5 = xtos_enabled & xtos_vpri_enabled -# else - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - rsr.intenable a3 - //interlock - or a5, a3, a2 // INTENABLE | mask -# endif - wsr.intenable a5 - xtos_unlock a7 - mov a2, a3 // return previous (virtual or real) INTENABLE value -#else /*XCHAL_HAVE_INTERRUPTS*/ - movi a2, 0 // this config does not have interrupts, so return 0 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - - .size _xtos_ints_on, . - _xtos_ints_on - diff --git a/src/arch/xtensa/xtos/kernel-vector.S b/src/arch/xtensa/xtos/kernel-vector.S deleted file mode 100644 index 3f86b0ea4857..000000000000 --- a/src/arch/xtensa/xtos/kernel-vector.S +++ /dev/null @@ -1,73 +0,0 @@ -// kernel-vector.S - Kernel Vector for General Exceptions -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/kernel-vector.S#1 $ - -// Copyright (c) 1999-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -/* - * General exceptions in kernel vector mode (PS.UM==0) go to this kernel - * vector. This kernel vector does very little. - * Under normal operation of the single-threaded runtime ("XTOS"), kernel - * vectored general exceptions do not occur, so nothing needs to be done. - * However when debugging, such as when writing exception and - * interrupt handlers, kernel vectored exceptions may occur. - * They are usually the sign of a bug, so here we take a breakpoint - * (if debug option enabled) or take drastic action (infinite loop) - * otherwise. - * - * XTOS does not allow exceptions in interrupt or exception handlers. - * If it did, a more elaborate kernel vector handler would be needed. - * See the Xtensa Microprocessor Programmer's Guide for an - * example of how to implement such a kernel vector handler. - */ - -#include -#include -#ifdef SIMULATOR -#include -#endif - -#if XCHAL_HAVE_EXCEPTIONS && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) - - .begin literal_prefix .KernelExceptionVector - .section .KernelExceptionVector.text, "ax" - - .align 4 - .global _KernelExceptionVector -_KernelExceptionVector: - -# if XCHAL_HAVE_DEBUG -1: break 1,0 // unexpected kernel exception -# elif defined(SIMULATOR) - wsr a2, EXCSAVE1 // save a2 where simulator expects it - movi a2, SYS_unhandled_kernel_exc -1: simcall // let simulator/debugger report unhandled exception -# else -1: -# endif - j 1b // infinite loop - unexpected kernel exception - - .size _KernelExceptionVector, . - _KernelExceptionVector - .text - .end literal_prefix - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/memctl_default.S b/src/arch/xtensa/xtos/memctl_default.S deleted file mode 100644 index 164f2c2d2f58..000000000000 --- a/src/arch/xtensa/xtos/memctl_default.S +++ /dev/null @@ -1,42 +0,0 @@ -// memctl_default.S - Default startup value for MEMCTL register. - -// Copyright (c) 1998-2015 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include - - -// This file just contains this one symbol, used by the reset code. -// It is here rather than in reset-vector.S because we want the symbol -// to be external, so resolution is delayed until link time. -// -// To define your own value to override this default, redefine the -// symbol __memctl_default to the desired value, e.g. - -// -// xt-xcc test.c -g -o test -Wl,--defsym=__memctl_default=0x08080808 -// - - .global __memctl_default - .weak __memctl_default - .equ __memctl_default, XCHAL_CACHE_MEMCTL_DEFAULT - diff --git a/src/arch/xtensa/xtos/memep-enable.S b/src/arch/xtensa/xtos/memep-enable.S deleted file mode 100644 index c3724204675c..000000000000 --- a/src/arch/xtensa/xtos/memep-enable.S +++ /dev/null @@ -1,63 +0,0 @@ -// memep-enable.S -- Turn on local memory ECC/parity checking -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/memep-enable.S#1 $ - -// Copyright (c) 2006-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - - /* - * void _xtos_memep_enable(int flags); - * - * Turn on local memory ECC/parity checking, for both - * data access and instruction fetch. - * - * For now, flags are ignored. Caller is expected to pass zero. - * - * _xtos_memep_initrams() must have already been called, if necessary, - * to ensure all ECC/parity bits are valid in any local instruction - * or data RAM. The normal reset vector sequence already takes care - * of initializing any local cache ECC/parity bits. - */ - .text - .align 4 - .global _xtos_memep_enable -_xtos_memep_enable: - abi_entry - -#if XCHAL_HAVE_MEM_ECC_PARITY - - // Local Memory ECC/Parity option initialization - // - // NOTE: We turn on exceptions on correctable errors and correct - // them in the memory error handler. - movi a2, MESR_ERRENAB | MESR_DATEXC | MESR_INSEXC - wsr a2, MESR - isync - -#endif /* XCHAL_HAVE_MEM_ECC_PARITY */ - - movi a2, 0 // successfully turned on what we could - abi_return - - .size _xtos_memep_enable, . - _xtos_memep_enable - diff --git a/src/arch/xtensa/xtos/memep-initrams.S b/src/arch/xtensa/xtos/memep-initrams.S deleted file mode 100644 index 8cc399e55c59..000000000000 --- a/src/arch/xtensa/xtos/memep-initrams.S +++ /dev/null @@ -1,91 +0,0 @@ -// memep-initrams.S -- Initialize local memory ECC/parity -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/memep-initrams.S#1 $ - -// Copyright (c) 2006-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - - /* - * void _xtos_memep_initrams(void); - * - * Most reset vectors initialize caches, leaving only the local memories - * (instruction and data RAMs) with potentially some words that have - * not been written to and thus have uninitialized ECC/parity bits. - * Loading such a word after enabling ECC/parity checking would result - * in an exception (or memory error reported in MESR). To avoid this, - * an application must either carefully avoid loading from uninitialized - * words, or ensure it writes to every instruction and data RAM word. - * The latter is what this function does. It reads and writes every - * word of every local instruction and data RAM. It should normally - * be called with interrupts disabled. An interrupt might come in - * between a load and store, in which case any modification made by the - * interrupt handler to that local memory location is lost when this - * function resumes and does the store. If no interrupt handler makes - * any persistent modification to local memories, disabling them around - * a call to this function may be unnecessary. - * - * On the simulator (ISS), everything comes up zeroed, so no there is - * no need for this initialization. - */ - .text - .align 4 - .global _xtos_memep_initrams -_xtos_memep_initrams: - abi_entry - - // Local Memory ECC/Parity option initialization -#if XCHAL_HAVE_MEM_ECC_PARITY && (XCHAL_NUM_DATARAM || XCHAL_NUM_INSTRAM /*|| XCHAL_NUM_URAM || XCHAL_NUM_XLMI*/) && !defined(SIMULATOR) - .section .rodata, "a" - .align 4 -.L_locmemep_start: -# if XCHAL_NUM_DATARAM >= 1 && XCHAL_DATARAM0_ECC_PARITY - .long XCHAL_DATARAM0_VADDR, XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE -# endif -# if XCHAL_NUM_DATARAM >= 2 && XCHAL_DATARAM1_ECC_PARITY - .long XCHAL_DATARAM1_VADDR, XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE -# endif -# if XCHAL_NUM_INSTRAM >= 1 && XCHAL_INSTRAM0_ECC_PARITY - .long XCHAL_INSTRAM0_VADDR, XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE -# endif -# if XCHAL_NUM_INSTRAM >= 2 && XCHAL_INSTRAM1_ECC_PARITY - .long XCHAL_INSTRAM1_VADDR, XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE -# endif -.L_locmemep_end: - .text - movi a5, .L_locmemep_start // start of table of local memory ranges - movi a6, .L_locmemep_end // end of table ... -2: l32i a3, a5, 0 // start of local memory - l32i a4, a5, 4 // end of local memory - addi a5, a5, 8 // (next entry in table) -1: l32i a2, a3, 0 // load and store every word of local memory... - s32i a2, a3, 0 // ... to initialize all parity and/or ECC bits - addi a3, a3, 4 - bltu a3, a4, 1b // loop until whole memory initialized - bltu a5, a6, 2b // loop until all memories initialized - // ECC/parity bits are now initialized, checking can be turned on. -#endif /* ECC/parity on instruction or data RAM(s) */ - - abi_return - - .size _xtos_memep_initrams, . - _xtos_memep_initrams - diff --git a/src/arch/xtensa/xtos/memerror-vector.S b/src/arch/xtensa/xtos/memerror-vector.S deleted file mode 100644 index 460b58ac997b..000000000000 --- a/src/arch/xtensa/xtos/memerror-vector.S +++ /dev/null @@ -1,581 +0,0 @@ -/* memerror-vector.S -- Memory Error Exception Vector and Handler */ - -/* $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/memerror-vector.S#1 $ */ - -/* - * Copyright (c) 2006-2013 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - - -/* - * This handler supports multiple user hooks to handle various cases. - * This is the list of available hooks: - * - * _xtos_merr_hook_fatal_dme -- fatal error, double memory exception - * _xtos_merr_hook_uncorrectable_local -- fatal error, uncorrectable error in IRAM/DRAM - * _xtos_merr_hook_uncor_dtag -- fatal error, uncorrectable error in dcache tag - * _xtos_merr_hook_uncor_dirty -- fatal error, uncorrectable error in dirty dcache data - * _xtos_merr_hook_icache_relock -- non-fatal, hook to relock icache - * _xtos_merr_hook_dcache_relock -- non-fatal, hook to relock dcache - * _xtos_merr_hook_nonfatal -- non-fatal, correctable error - */ - - -#include -#include - -#if XCHAL_HAVE_MEM_ECC_PARITY -# if defined(__SPLIT__vector) - - // Place this code in the memory error exception vector: - .begin literal_prefix .MemoryExceptionVector - .section .MemoryExceptionVector.text, "ax" - - .global _MemErrorVector - .align 4 -_MemErrorVector: -# if 0 /* XCHAL_HAVE_DEBUG */ - // Memory errors raise PS.INTLEVEL above DEBUGLEVEL, so - // break instructions have no effect within them (debug - // exceptions are masked). So leave commented out for now. - break 1, 5 // unhandled memory error exception -# endif - xsr.mesave a0 - jx a0 - - .size _MemErrorVector, . - _MemErrorVector - .text - .end literal_prefix - - -# elif defined(__SPLIT__handler) - -/* - * Some rules and assumptions: - * - * Anything that can interrupt this handler (e.g. NMI): - * - must not lock or unlock cache lines - */ - - -#define ICACHE_WAYWIDTH (XCHAL_ICACHE_SETWIDTH + XCHAL_ICACHE_LINEWIDTH) /* LICT's "iis" */ -#define DCACHE_WAYWIDTH (XCHAL_DCACHE_SETWIDTH + XCHAL_DCACHE_LINEWIDTH) /* LDCT's "dis" */ -/* NOTE: Memory ECC/parity is not supported on XLMI or on local ROMs: */ -#define HAVE_LOCAL_RAM (XCHAL_NUM_DATARAM || XCHAL_NUM_INSTRAM /*|| XCHAL_NUM_URAM || XCHAL_NUM_XLMI*/) - - -#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_WAYS > 1 && XCHAL_HAVE_PREFETCH - .comm _MemErrorSave, 12, 4 -#else - //.lcomm _MemErrorSave, 8 - .comm _MemErrorSave, 8, 4 -#endif - - .text - .align 4 - .global _MemErrorHandler -_MemErrorHandler: - hw_erratum_487_fix - rsr.mesr a0 - bbsi.l a0, MESR_DME_SHIFT, .L_fatal_dme -# if XCHAL_ICACHE_SIZE > 0 || XCHAL_DCACHE_SIZE > 0 - bbsi.l a0, MESR_MEMTYPE_SHIFT+1, .L_cache // branch if error on a cache -# endif - // Error in a local memory. -# if HAVE_LOCAL_RAM - bbsi.l a0, MESR_ERRTYPE_SHIFT, .L_uncorrectable_local - // Correctable error in a local memory (IRAM or DRAM). - // (MEVADDR has all 32 bits, so XSR preserves a register:) - xsr.mevaddr a2 - // Note: MEVADDR is always 4-byte aligned, - // so we can just do L32I/S32I to correct the error. - // However, that's not atomic, and NMI can store in between; - // that's usually a problem for D rather than I, avoid the - // issue using S32C1I if configured (else NMI must not write DataRAM!?!): -# if (XCHAL_HAVE_S32C1I && (XCHAL_NUM_DATARAM /*|| XCHAL_NUM_URAM || XCHAL_NUM_XLMI*/)) - bbci.l a0, MESR_MEMTYPE_SHIFT, .L_instram // branch if error on InstRAM - // Unfortunately we need 3 registers to do S32C1I (data,addr,SCOMPARE1) so - // we need to save to _MemErrorSave: - movi a0, _MemErrorSave - s32i a4, a0, 0 // save a4 - l32i a4, a2, 0 // load data (re-correct) - rsr.scompare1 a0 // save SCOMPARE1 - wsr.scompare1 a4 - s32c1i a4, a2, 0 // store if still contains same value (else other store corrected error) - movi a4, _MemErrorSave - wsr.scompare1 a0 // restore SCOMPARE1 - l32i a4, a4, 0 // restore a4 - j 2f -.L_instram: -# endif - l32i a0, a2, 0 // load data (re-correct) - s32i a0, a2, 0 // store data to correct ECC bits -2: xsr.mevaddr a2 -# endif /* HAVE_LOCAL_RAM */ - - - // Weak reference: if unresolved, links okay but with zero value: - // - // This hook must return using a RET instruction. It will receive - // the return address in a0, and the MESR register value in a2. - // The hook may use and trash a2 and a3, but all other registers - // must be preserved. - - .weak _xtos_merr_hook_nonfatal - -.L_done: - // Finished, now see if there is a user hook to call before returning - movi a0, _xtos_merr_hook_nonfatal // hook address - beqz a0, 1f // no hook, return - movi a0, _MemErrorSave // save area address - s32i a2, a0, 0 // save a2 - s32i a3, a0, 4 // save a3 - movi a0, _xtos_merr_hook_nonfatal // re-load hook address - rsr.mesr a2 // pass MESR value as arg - callx0 a0 // call user hook - movi a3, _MemErrorSave - l32i a2, a3, 0 // restore a2 - l32i a3, a3, 4 // restore a3 -1: movi a0, _MemErrorHandler // handler address - xsr.mesave a0 // restore a0 and MESAVE - rfme - - - // Weak reference: if unresolved, links okay but with zero value: - .weak _xtos_merr_hook_fatal_dme -.L_fatal_dme: - // Fatal (unrecoverable) error, double memory exception - movi a0, _xtos_merr_hook_fatal_dme -1: beqz a0, 1b // fatal double memory error, no hook, so infinite loop - jx a0 // jump to user hook, if present - - -# if HAVE_LOCAL_RAM - // Weak reference: if unresolved, links okay but with zero value: - .weak _xtos_merr_hook_uncorrectable_local -.L_uncorrectable_local: - // Fatal (unrecoverable) error in IRAM or DRAM: parity or uncorrectable ECC error - movi a0, _xtos_merr_hook_uncorrectable_local -1: beqz a0, 1b // fatal memory error, no hook provided, so infinite loop - jx a0 // jump to user hook, if present -# endif - - -# if XCHAL_ICACHE_SIZE > 0 || XCHAL_DCACHE_SIZE > 0 -.L_cache: - // Error in one of the caches. -# endif - -# if XCHAL_ICACHE_SIZE > 0 && XCHAL_HAVE_ICACHE_TEST -# if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_DCACHE_TEST - bbsi.l a0, MESR_MEMTYPE_SHIFT, .L_dcache // branch if data cache error -# endif - // Error in the instruction cache. - bbsi.l a0, MESR_ERRTYPE_SHIFT, .L_icache_noncorr // branch if uncorrectable - // Correctable error in the instruction cache. - xsr.mevaddr a2 - // TODO FIXME: remove these 5 lines if waynum is in MEVADDR!? by using III if tag and IHI otherwise!?!?!?: -# if XCHAL_ICACHE_WAYS > 1 - extui a0, a0, MESR_WAYNUM_SHIFT, 2 - slli a0, a0, ICACHE_WAYWIDTH - slli a2, a2, 32 - ICACHE_WAYWIDTH - srli a2, a2, 32 - ICACHE_WAYWIDTH - or a2, a2, a0 -# endif -# if XCHAL_ICACHE_LINE_LOCKABLE - // Preserve the lock bit. So check the tag... - lict a0, a2 // load i-cache tag - bbci.l a0, XCHAL_ICACHE_TAG_L_SHIFT, .L_icache_corr_unlocked // branch if unlocked - // Correctable error in a locked instruction cache line. - // Fix both tag and one word, quicker than figuring out whether error is in tag or data: - sict a0, a2 // fix tag - // TODO FIXME: on fetch wider than 32-bits, SICW might replicate its 32 bits to the - // whole fetch width rather than just write 32-bits, depending on how customer wires up - // I-cache RAMs. With ECC option and I-cache locking, they need 32-bit word write enables. - licw a0, a2 - sicw a0, a2 // fix data word - j .L_icache_done -.L_icache_corr_unlocked: - // We have to write the whole tag to avoid hitting an error here (if tag error). - // So use IIU (which also invalidates) not III (which reads L bit so can hit error). - iiu a2, 0 // unlock line ==> also invalidates! (I-side only) -# else - iii a2, 0 // invalidate line (whole set!) -# endif -.L_icache_done: - xsr.mevaddr a2 - j .L_done - -.L_icache_noncorr: - // Non-correctable error in the instruction cache. - bbsi.l a0, MESR_MEMTYPE_SHIFT+2, .L_icache_tag_noncorr // branch if tag error - // Non-correctable error in the instruction cache data. - // Just invalidate the line if we can. -# if XCHAL_ICACHE_LINE_LOCKABLE - // If locked, need a different fix sequence. - xsr.mevaddr a2 - -# if XCHAL_ICACHE_WAYS > 1 - // This sequence is shorter, but does not retain original MEVADDR so - // prevents subsequent use of instructions requiring a virtual address - // (such as LICW, IPFL, etc): -// extui a0, a0, MESR_WAYNUM_SHIFT, 2 -// slli a0, a0, ICACHE_WAYWIDTH -// slli a2, a2, 32 - ICACHE_WAYWIDTH -// srli a2, a2, 32 - ICACHE_WAYWIDTH -// or a2, a2, a0 - - extui a0, a0, MESR_WAYNUM_SHIFT, 2 // id of way with mem error - slli a0, a0, ICACHE_WAYWIDTH - xor a0, a2, a0 // xor corresponding bits of addr - extui a0, a0, ICACHE_WAYWIDTH, 2 // take 2 xor'ed way bits - or a2, a2, a0 // save them at bottom of addr - slli a0, a0, ICACHE_WAYWIDTH - xor a2, a2, a0 // and change 2 way bits of addr -# endif - lict a0, a2 - bbsi.l a0, XCHAL_ICACHE_TAG_L_SHIFT, .L_icache_locked_uncor // branch if locked - // Cache line is not locked, just invalidate: -# if XCHAL_ICACHE_WAYS > 1 - iii a2, 0 -# else - ihi a2, 0 -# endif - j .L_icache_done - - // NOTE: we don't use the LICW/SICW sequence below unless the line is locked, - // otherwise the i-cache line might get replaced between LICW and SICW - // (if we're not extremely careful), which would be disastrous. - // Also, for locked lines, LICW/SICW is much safer than IHU/IHI/IPFL - // because it doesn't leave a window where the line is unlocked; - // however, if the error is non-correctable, we have no choice. - -.L_icache_locked_uncor: - // If locked and uncorrectable however, the only recourse is relocking. - // So we need to recover the virtual address so we can do IPFL. - // Note: can't use MEPC instead of MEVADDR, because (a) it might not - // point to the correct cache line, and (b) it might be completely wrong - // in the case where the mem error happened e.g. during an LICW or IPFL. -# if XCHAL_ICACHE_WAYS > 1 - // Recover virtual address in a2: - extui a0, a2, 0, 2 // get saved xor'ed bits at bottom - slli a0, a0, ICACHE_WAYWIDTH // line them up - xor a2, a2, a0 // restore original MEVADDR -# endif - ihu a2, 0 // unlock line - ihi a2, 0 // invalidate line - ipfl a2, 0 // refetch-and-lock the line - j .L_icache_done -# else /* LOCKABLE */ - rsr.mevaddr a0 - ihi a0, 0 // invalidate that cache line - j .L_done -# endif /* LOCKABLE */ - -.L_icache_tag_noncorr: - // Non-correctable error in the instruction cache tag. - // Just invalidate the tag or the entire set. -# if XCHAL_ICACHE_LINE_LOCKABLE - // Note: - // With i-cache locking, IIU writes the entire tag without mem-error check, - // and III writes part of it (leaves lock bit alone) so can hit errors. - // Without i-cache locking, III writes the entire tag without mem-error check. - // (Original assumption was that SICT is needed.) -# if XCHAL_ICACHE_WAYS > 1 - // TODO FIXME: avoid this 8-line alternative if waynum is in MEVADDR!?: - xsr.mevaddr a2 - extui a0, a0, MESR_WAYNUM_SHIFT, 2 - slli a0, a0, ICACHE_WAYWIDTH - slli a2, a2, 32 - ICACHE_WAYWIDTH - srli a2, a2, 32 - ICACHE_WAYWIDTH - or a2, a2, a0 - iiu a2, 0 // unlock line ==> also invalidates! (I-side only) - xsr.mevaddr a2 -# else - rsr.mevaddr a0 - iiu a0, 0 // unlock line ==> also invalidates! (I-side only) -# endif - // If line was locked, can't recover lock state, need external info to recover. - // User can provide an assembler hook routine _xtos_merr_hook_icache_relock - // to relock the icache at the index in a2: - // - any number of lines might still be locked at that index, - // including all of them - // - no stack is provided, a0 must be used as starting point to - // load a save area and saved registers as necessary - // - unless routine just does ret (i.e. does not modify any - // register, only possible if it does nothing), it needs to - // return by restoring all registers it modified, ending with: - // movi a0, _MemErrorHandler - // xsr.mesave a0 - // rfme - // CAVEAT EMPTOR: this hook mechanism is subject to change. - .weak _xtos_merr_hook_icache_relock // if unresolved, links with zero value - movi a0, _xtos_merr_hook_icache_relock -1: beqz a0, 1b // if no hook to recover lock state on icache tag mem error, loop forever - callx0 a0 // invoke user hook to relock i-cache (index in MEVADDR) -# else - rsr.mevaddr a0 - iii a0, 0 // invalidate entire set -# endif - j .L_done -# endif /* have ICACHE */ - - -# if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_DCACHE_TEST -# if XCHAL_ICACHE_SIZE > 0 && XCHAL_HAVE_ICACHE_TEST -.L_dcache: -# endif - // Error in the data cache. -# if XCHAL_DCACHE_IS_WRITEBACK || XCHAL_DCACHE_LINE_LOCKABLE - bbsi.l a0, MESR_ERRTYPE_SHIFT, .L_dcache_noncorr // branch if uncorrectable - // Uncorrectable error on a writeback dcache might be unrecoverable: -# endif - bbsi.l a0, MESR_MEMTYPE_SHIFT+2, .L_dcache_tag // branch if tag error - // Error in the data cache data (correctable, or non-correctable in writethru+unlockable cache). - // MEVADDR always a real vaddr here; might point to cache-isolate mode area though. -# if XCHAL_DCACHE_LINE_LOCKABLE - // Correctable error on lockable dcache data. - // If locked, need to refetch the line (or load/store its contents, which is less safe): - xsr.mevaddr a2 -# if XCHAL_DCACHE_WAYS > 1 - // Need some extra computation to get the correct dcache way's tag: - movi a0, _MemErrorSave - s32i a4, a0, 0 // save a4 - s32i a5, a0, 4 // save a5 -# if XCHAL_HAVE_PREFETCH - s32i a6, a0, 8 // save a6 - movi a6, 0 - xsr.prefctl a6 // disable prefetch during LDCT (issue TENX-24760) -# endif - rsr.mesr a4 - extui a4, a4, MESR_WAYNUM_SHIFT, 2 - slli a4, a4, DCACHE_WAYWIDTH - slli a5, a2, 32 - DCACHE_WAYWIDTH - srli a5, a5, 32 - DCACHE_WAYWIDTH - add a4, a4, a5 - mov a5, a0 - ldct a0, a4 - l32i a4, a5, 0 // restore a4 -# if XCHAL_HAVE_PREFETCH - wsr.prefctl a6 // restore prefetch - l32i a6, a5, 8 // restore a6 -# endif - l32i a5, a5, 4 // restore a5 -# else -# if XCHAL_HAVE_PREFETCH - movi a0, _MemErrorSave - s32i a4, a0, 0 // save a4 - movi a4, 0 - xsr.prefctl a4 // disable prefetch during LDCT (issue TENX-24760) -# endif - ldct a0, a2 // invalidate and unlock that cache tag -# if XCHAL_HAVE_PREFETCH - wsr.prefctl a4 // restore prefetch - movi a4, _MemErrorSave - l32i a4, a4, 0 // restore a4 -# endif -# endif - // FIXME: if castout, a2 is a cache index (see PR 24103), from which - // we can construct a physical address! need that paddr reconstruction, - // and doesn't work with any address translation. -# if 0 /* translation */ - movi a4, _xtos_vmap_vaddr // FIXME: do we need two variables for full MMU? -1: beqz a4, 1b // if no vaddr to use, loop forever (FIXME: caxlt: could assume V==P) - rdtlb1 a5, a4 // save current contents - ... clear lower bits of a4 ... - xx = some function of a2 - wdtlb xx, a4 - a2 = virtual address, i.e. some function of a2 and a4 ... - ... do the sequence below ... - ... - wdtlb a5, a4 // restore TLB entry -# endif - // NOTE: the following sequence leaves the line temporarily unlocked, if locked. - // We assume NMI handlers don't lock lines or rely on their being locked. - // We could have used "l32i a0,a2,0; s32i a0,a2,0" but that's not atomic on the data. - dhu a2, 0 // unlock the cache line, if locked - dhwbi a2, 0 // writeback and invalidate cache line - bbci.l a0, XCHAL_DCACHE_TAG_L_SHIFT, 1f - dpfl a2, 0 // re-prefetch-and-lock the cache line -1: xsr.mevaddr a2 -# else /* LOCKABLE */ - // Error in unlockable data cache data (correctable, or non-correctable in writethru cache). - rsr.mevaddr a0 - // USELESS NOTE: if writethru dcache and NMI handlers don't store to this, we could use DHI instead: - // FIXME: if castout, a0 is a physical address! doesn't work with any address translation. - dhwbi a0, 0 // writeback (if correctable) and invalidate that cache line -# endif /* LOCKABLE */ - j .L_done - -.L_dcache_tag: - // Error in data cache tag (correctable, or non-correctable in writethru+unlockable cache). - // MEVADDR only contains cache index here (not waynum), don't expect a vaddr (the ISA - // says upper bits are undefined; actual hw does put a vaddr, but in future might not). - // Whether or not correctable, just invalidate the particular way's line: - xsr.mevaddr a2 - // NOTE: could remove these 5 lines if hw were designed with waynum in MEVADDR (but is not): -# if XCHAL_DCACHE_WAYS > 1 - extui a0, a0, MESR_WAYNUM_SHIFT, 2 - slli a0, a0, DCACHE_WAYWIDTH - slli a2, a2, 32 - DCACHE_WAYWIDTH - srli a2, a2, 32 - DCACHE_WAYWIDTH - or a2, a2, a0 -# endif -# if XCHAL_DCACHE_LINE_LOCKABLE -# if XCHAL_HAVE_PREFETCH - movi a0, _MemErrorSave - s32i a4, a0, 0 // save a4 - movi a4, 0 - xsr.prefctl a4 // disable prefetch during LDCT (issue TENX-24760) -# endif - ldct a0, a2 // invalidate and unlock that cache tag -# if XCHAL_HAVE_PREFETCH - wsr.prefctl a4 // restore prefetch - movi a4, _MemErrorSave - l32i a4, a4, 0 // restore a4 -# endif - bbci.l a0, XCHAL_DCACHE_TAG_L_SHIFT, 1f // branch if not locked - sdct a0, a2 // if locked, this safely writes whole tag -# endif -1: diwbi a2, 0 // writeback (if correctable) and invalidate the line - xsr.mevaddr a2 - j .L_done - - - -# if XCHAL_DCACHE_IS_WRITEBACK || XCHAL_DCACHE_LINE_LOCKABLE -.L_dcache_noncorr: - // Uncorrectable error on a (writeback and/or lockable) data cache. -# if XCHAL_DCACHE_IS_WRITEBACK - // On tag errors we don't know whether the line is dirty, so this is unrecoverable: - bbsi.l a0, MESR_MEMTYPE_SHIFT+2, .L_uncorrectable_dtag // branch if tag error - // Castouts are by definition dirty, uncorrectable errors on these are unrecoverable: - bbsi.l a0, MESR_ACCTYPE_SHIFT, .L_uncorrectable_dirty // branch if castout - // Note: could still be an error on dirty dcache data, also unrecoverable. -# else - bbsi.l a0, MESR_MEMTYPE_SHIFT+2, .L_dcache_tag_noncorr // branch if tag error -# endif - // Uncorrectable error in dcache data. - // May be dirty or locked, so get tag to find out. - xsr.mevaddr a2 -# if XCHAL_DCACHE_WAYS > 1 - extui a0, a0, MESR_WAYNUM_SHIFT, 2 // id of way with mem error - slli a0, a0, DCACHE_WAYWIDTH - xor a0, a2, a0 // xor corresponding bits of addr - extui a0, a0, DCACHE_WAYWIDTH, 2 // take 2 xor'ed way bits - or a2, a2, a0 // save them at bottom of addr - slli a0, a0, DCACHE_WAYWIDTH - xor a2, a2, a0 // and change 2 way bits of addr -# endif -# if XCHAL_HAVE_PREFETCH - movi a0, _MemErrorSave - s32i a4, a0, 0 // save a4 - movi a4, 0 - xsr.prefctl a4 // disable prefetch during LDCT (issue TENX-24760) -# endif - ldct a0, a2 // get dcache tag -# if XCHAL_HAVE_PREFETCH - wsr.prefctl a4 // restore prefetch - movi a4, _MemErrorSave - l32i a4, a4, 0 // restore a4 -# endif -# if XCHAL_DCACHE_IS_WRITEBACK - bbsi.l a0, XCHAL_DCACHE_TAG_D_SHIFT, .L_uncorrectable_dirty_2 // branch if dirty -# endif - // Data cache line is clean. -# if XCHAL_DCACHE_LINE_LOCKABLE - bbsi.l a0, XCHAL_DCACHE_TAG_L_SHIFT, .L_dcache_nc_locked -# endif - // Data cache line is clean and unlocked. Just invalidate it. - // FIXME: any stores to this line by an NMI handler will be lost. - // On the other hand, if we use DHWBI, any stores by an NMI handler - // that don't happen to fix the error result in an unrecoverable castout. - // -# if XCHAL_DCACHE_WAYS > 1 - // Recover virtual address in a2: - extui a0, a2, 0, 2 // get saved xor'ed bits at bottom - slli a0, a0, DCACHE_WAYWIDTH // line them up - xor a2, a2, a0 // restore original MEVADDR -# endif - dhi a2, 0 // invalidate that data cache line - xsr.mevaddr a2 - j .L_done - -# if XCHAL_DCACHE_LINE_LOCKABLE -.L_dcache_nc_locked: -# if XCHAL_DCACHE_WAYS > 1 - // Recover virtual address in a2: - extui a0, a2, 0, 2 // get saved xor'ed bits at bottom - slli a0, a0, DCACHE_WAYWIDTH // line them up - xor a2, a2, a0 // restore original MEVADDR -# endif - // Unlock, invalidate, and relock it: - dhu a2, 0 // unlock that data cache line - dhi a2, 0 // invalidate that data cache line - dpfl a2, 0 // prefetch-and-lock the line again - xsr.mevaddr a2 - j .L_done -# endif - -# if XCHAL_DCACHE_IS_WRITEBACK - // Weak reference: if unresolved, links okay but with zero value: - .weak _xtos_merr_hook_uncor_dtag -.L_uncorrectable_dtag: - // Fatal (unrecoverable) error in dcache tag (maybe dirty): parity or uncorrectable ECC error - movi a0, _xtos_merr_hook_uncor_dtag -1: beqz a0, 1b // fatal non-corr dcache tag, no hook, so infinite loop - jx a0 // jump to user hook, if present - - // Weak reference: if unresolved, links okay but with zero value: - .weak _xtos_merr_hook_uncor_dirty -.L_uncorrectable_dirty_2: - xsr.mevaddr a2 -.L_uncorrectable_dirty: - // Fatal (unrecoverable) error, parity or non-correctable ECC error on dirty cache data - movi a0, _xtos_merr_hook_uncor_dirty -1: beqz a0, 1b // fatal non-corr dirty cache line, no hook, so infinite loop - jx a0 // jump to user hook, if present -# else -.L_dcache_tag_noncorr: - // Uncorrectable error on a lockable writethru data cache tag. - // We have to invalidate the line, but that way we lose the lock bit. - // Provide a hook to relock if necessary (using knowledge outside this module - // about what needs to be locked). See _xtos_merr_hook_icache_relock for details. - // CAVEAT EMPTOR: this hook mechanism is subject to change. - .weak _xtos_merr_hook_dcache_relock // if unresolved, links with zero value - movi a0, _xtos_merr_hook_dcache_relock -1: beqz a0, 1b // if no hook to recover lock state on dcache tag mem error, loop forever - callx0 a0 // invoke user hook to relock d-cache (index in MEVADDR) - j .L_done -# endif - -# endif /* DCACHE IS WRITEBACK || LINE_LOCKABLE */ - -# endif /* have DCACHE */ - - .size _MemErrorHandler, . - _MemErrorHandler - - - -# endif /* splitting */ -#endif /* XCHAL_HAVE_MEM_ECC_PARITY */ - diff --git a/src/arch/xtensa/xtos/nmi-vector.S b/src/arch/xtensa/xtos/nmi-vector.S deleted file mode 100644 index 637dcda96940..000000000000 --- a/src/arch/xtensa/xtos/nmi-vector.S +++ /dev/null @@ -1,60 +0,0 @@ -// nmi-vector.S -- Standalone NMI Interrupt Vector/Handler -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/nmi-vector.S#1 $ - -// Copyright (c) 2003, 2006, 2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// The NMI exception vector handles non-maskable interrupts. - -#include -#include - -#if XCHAL_HAVE_NMI - - .begin literal_prefix .NMIExceptionVector - .section .NMIExceptionVector.text, "ax" - - .align 4 - .global _NMIExceptionVector -_NMIExceptionVector: - -// Insert any custom NMI handling code here. -// For example: -// writesr excsave XCHAL_NMILEVEL a0 -// movi a0, ...address of some save area specific to this code... -// s32i a1, a0, 0 // save whatever registers are needed -// : -// do something useful ... -// : -// l32i a1, a0, 0 // restore whatever registers were saved -// readsr excsave XCHAL_NMILEVEL a0 -// -// This default NMI handler does not do anything. It just returns -// immediately upon any occurrence of a non-maskable interrupt. - - rfi XCHAL_NMILEVEL - - .size _NMIExceptionVector, . - _NMIExceptionVector - .text - .end literal_prefix - -#endif /* XCHAL_HAVE_NMI */ - diff --git a/src/arch/xtensa/xtos/null-syscall.S b/src/arch/xtensa/xtos/null-syscall.S deleted file mode 100644 index fb53cf948865..000000000000 --- a/src/arch/xtensa/xtos/null-syscall.S +++ /dev/null @@ -1,38 +0,0 @@ -// null-syscall.S - Stub for Unused SYSCALL Handler -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/null-syscall.S#1 $ - -// Copyright (c) 2006-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - // Redirect SYSCALL exceptions as not handled, when - // that functionality is not needed. Done this way, a user can - // just specify this small handler to override the default one. - - .text - .weak xtos_unhandled_exception - .global _xtos_syscall_handler -_xtos_syscall_handler: - movi a3, xtos_unhandled_exception -1: beqz a3, 1b - jx a3 - .size _xtos_syscall_handler, . - _xtos_syscall_handler - diff --git a/src/arch/xtensa/xtos/null-vectors.S b/src/arch/xtensa/xtos/null-vectors.S deleted file mode 100644 index 0bb274d690c1..000000000000 --- a/src/arch/xtensa/xtos/null-vectors.S +++ /dev/null @@ -1,184 +0,0 @@ -// null-vectors.S - Stubs for Unused Vectors and Handlers -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/null-vectors.S#1 $ - -// Copyright (c) 2006-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -// NOTE: The simulator version of this file is currently not used, -// i.e. the SIMULATOR macro is never defined when assembling this file. -// The relevant simulator code is left here for illustrative purposes only. - - -#include -#include "xtos-internal.h" -#ifdef SIMULATOR -#include -#endif - - // These are just tiny non-functional vectors and handlers for when - // their functionality is not being used. They just try to signal - // the debugger that an unhandled exception or interrupt occurred, - // and otherwise just spin in a loop. - // - // For interrupts levels above DEBUGLEVEL, lowering PS.INTLEVEL - // for break to work is tricky, and not always possible in a - // generic fashion without interfering with normal program execution. - // So for now we don't do it. - - -#if defined(__SPLIT__user) - -# if XCHAL_HAVE_EXCEPTIONS - .begin literal_prefix .UserExceptionVector - .section .UserExceptionVector.text, "ax" - .align 4 - .global _UserExceptionVector -_UserExceptionVector: -# if XCHAL_HAVE_DEBUG - break 1, 1 // unexpected user-vectored general exception -# endif -1: j 1b // infinite loop - unexpected user-vectored exception - .size _UserExceptionVector, . - _UserExceptionVector - .end literal_prefix -# endif - -#elif defined(__SPLIT__level1int) - - .text - .global _xtos_l1int_handler -_xtos_l1int_handler: - movi a3, xtos_unhandled_exception - jx a3 - .size _xtos_l1int_handler, . - _xtos_l1int_handler - -#elif defined(__SPLIT__level2) - -# if (XCHAL_NUM_INTLEVELS >= 2) && (XCHAL_DEBUGLEVEL != 2) - .begin literal_prefix .Level2InterruptVector - .section .Level2InterruptVector.text, "ax" - .align 4 - .global _Level2Vector -_Level2Vector: -# if XCHAL_HAVE_DEBUG && (XCHAL_DEBUGLEVEL > 2) - break 1, 2 // unexpected high-priority interrupt -# elif defined(SIMULATOR) - wsr a2, EXCSAVE_2 - movi a2, SYS_unhandled_highpri_interrupt - simcall // let simulator/debugger report unhandled level-2 interrupt -# endif -1: j 1b // infinite loop - unexpected level-2 interrupt - .size _Level2Vector, . - _Level2Vector - .text - .end literal_prefix -# endif /* level 2 */ - - -#elif defined(__SPLIT__level3) - -# if (XCHAL_NUM_INTLEVELS >= 3) && (XCHAL_DEBUGLEVEL != 3) - .begin literal_prefix .Level3InterruptVector - .section .Level3InterruptVector.text, "ax" - .align 4 - .global _Level3Vector -_Level3Vector: -# if XCHAL_HAVE_DEBUG && (XCHAL_DEBUGLEVEL > 3) - break 1, 2 // unexpected high-priority interrupt -# elif defined(SIMULATOR) - wsr a2, EXCSAVE_3 - movi a2, SYS_unhandled_highpri_interrupt - simcall // let simulator/debugger report unhandled level-3 interrupt -# endif -1: j 1b // infinite loop - unexpected level-3 interrupt - .size _Level3Vector, . - _Level3Vector - .text - .end literal_prefix -# endif /* level 3 */ - - -#elif defined(__SPLIT__level4) - -# if (XCHAL_NUM_INTLEVELS >= 4) && (XCHAL_DEBUGLEVEL != 4) - .begin literal_prefix .Level4InterruptVector - .section .Level4InterruptVector.text, "ax" - .align 4 - .global _Level4Vector -_Level4Vector: -# if XCHAL_HAVE_DEBUG && (XCHAL_DEBUGLEVEL > 4) - break 1, 2 // unexpected high-priority interrupt -# elif defined(SIMULATOR) - wsr a2, EXCSAVE_4 - movi a2, SYS_unhandled_highpri_interrupt - simcall // let simulator/debugger report unhandled level-4 interrupt -# endif -1: j 1b // infinite loop - unexpected level-4 interrupt - .size _Level4Vector, . - _Level4Vector - .text - .end literal_prefix -# endif /* level 4 */ - - -#elif defined(__SPLIT__level5) - -# if (XCHAL_NUM_INTLEVELS >= 5) && (XCHAL_DEBUGLEVEL != 5) - .begin literal_prefix .Level5InterruptVector - .section .Level5InterruptVector.text, "ax" - .align 4 - .global _Level5Vector -_Level5Vector: -# if XCHAL_HAVE_DEBUG && (XCHAL_DEBUGLEVEL > 5) - break 1, 2 // unexpected high-priority interrupt -# elif defined(SIMULATOR) - wsr a2, EXCSAVE_5 - movi a2, SYS_unhandled_highpri_interrupt - simcall // let simulator/debugger report unhandled level-5 interrupt -# endif -1: j 1b // infinite loop - unexpected level-5 interrupt - .size _Level5Vector, . - _Level5Vector - .text - .end literal_prefix -# endif /* level 5 */ - - -#elif defined(__SPLIT__level6) - -# if (XCHAL_NUM_INTLEVELS >= 6) && (XCHAL_DEBUGLEVEL != 6) - .begin literal_prefix .Level6InterruptVector - .section .Level6InterruptVector.text, "ax" - .align 4 - .global _Level6Vector -_Level6Vector: -# if XCHAL_HAVE_DEBUG && (XCHAL_DEBUGLEVEL > 6) - break 1, 2 // unexpected high-priority interrupt -# elif defined(SIMULATOR) - wsr a2, EXCSAVE_6 - movi a2, SYS_unhandled_highpri_interrupt - simcall // let simulator/debugger report unhandled level-6 interrupt -# endif -1: j 1b // infinite loop - unexpected level-6 interrupt - .size _Level6Vector, . - _Level6Vector - .text - .end literal_prefix -# endif /* level 6 */ - - -#endif /* split */ - diff --git a/src/arch/xtensa/xtos/reset-unneeded.S b/src/arch/xtensa/xtos/reset-unneeded.S deleted file mode 100644 index e662a5856a75..000000000000 --- a/src/arch/xtensa/xtos/reset-unneeded.S +++ /dev/null @@ -1,156 +0,0 @@ -// reset-unneeded.S -- Optional Extraneous Reset Code -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/reset-unneeded.S#1 $ - -// Copyright (c) 2002-2006 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// This file is meant to be included by another, e.g. crt1-***.S . -// The code it contains is generally not needed, so is kept in a -// separate file for clarity of other code. - -#if XTOS_RESET_UNNEEDED - /* - * Reset registers that don't really need to be reset, - * but may provide more predictability when buggy code - * relies on uninitialized state. It might also clear - * "X"s a bit earlier in hardware simulations. - * - * NOTE: This code is by no means exhaustive. - * More processor registers/states could be reset if desired. - * This is just an example. - * - * ASSUMPTION: a0 is still zero at this point. - */ - - // Interrupt initialization. - // Because INTENABLE is cleared by the reset vector, clearing the - // interrupt-pending register should not be needed. This assumes - // that any application setting up an interrupt will register and - // clear it before enabling it, which is the recommended sequence. - // -#if XCHAL_HAVE_INTERRUPTS && (XCHAL_INTCLEARABLE_MASK != 0) && !XCHAL_HAVE_FULL_RESET - movi a2, XCHAL_INTCLEARABLE_MASK - wsr a2, INTCLEAR // clear software and edge-trig ints -#endif - - // Timer initialization (not strictly required, but sometimes helpful) - .macro reset_timer num - wsr a0, CCOMPARE_0 + \num - .endm - iterate 0, XCHAL_NUM_TIMERS-1, reset_timer - -# if XCHAL_HAVE_WINDOWED - // Windowed address register init -- initialize entire physical AR file - movi a0, XCHAL_NUM_AREGS/8 // number of 8-register chunks -arloop: - addi a8, a0, -1 // countdown into next chunk's a0 - movi a0, 0 - movi a1, 0 - movi a2, 0 - movi a3, 0 - movi a4, 0 - movi a5, 0 - movi a6, 0 - movi a7, 0 - rotw 2 // rotate to next chunk - bnez a0, arloop - // NOTE: WINDOWBASE is back to zero at this point. -# else /* XCHAL_HAVE_WINDOWED */ - // Non-windowed address register init - movi a1, 0 - movi a2, 0 - movi a3, 0 - movi a4, 0 - movi a5, 0 - movi a6, 0 - movi a7, 0 - movi a8, 0 - movi a9, 0 - movi a10, 0 - movi a11, 0 - movi a12, 0 - movi a13, 0 - movi a14, 0 - movi a15, 0 -# endif /* XCHAL_HAVE_WINDOWED */ - // Now all address registers are zero. - - // Initialize LBEG, LEND, and LCOUNT. -# if XCHAL_HAVE_LOOPS - wsr a0, LCOUNT // note: LCOUNT gets cleared by processor reset - wsr a0, LBEG - wsr a0, LEND -# endif - -# if XCHAL_HAVE_DEBUG - .macro reset_dbreaka num - wsr a0, DBREAKA + \num - .endm - .macro reset_ibreaka num - wsr a0, IBREAKA + \num - .endm - iterate 0, XCHAL_NUM_DBREAK-1, reset_dbreaka - iterate 0, XCHAL_NUM_IBREAK-1, reset_ibreaka -# endif - - // SAR initialization - ssai 0 - - // Exception initialization -# if XCHAL_HAVE_EXCEPTIONS - wsr a0, EPC+1 - wsr a0, EXCSAVE+1 - wsr a0, EXCCAUSE -# endif - -# if XCHAL_HAVE_HIGHLEVEL_INTERRUPTS - .macro reset_int num - wsr a0, EPC + \num - wsr a0, EPS + \num - wsr a0, EXCSAVE + \num - .endm - iterate 2, XCHAL_NUM_INTLEVELS, reset_int -# endif - - // Booleans initialization -# if XCHAL_HAVE_BOOLEANS - wsr a0, BR -# endif - - // MAC16 initialization -# if XCHAL_HAVE_MAC16 - wsr a0, ACCLO - wsr a0, ACCHI - wsr a0, M0 - wsr a0, M1 - wsr a0, M2 - wsr a0, M3 -# endif - - // OCD initialization -# if XCHAL_HAVE_OCD - wsr a0, DDR -# endif - - isync // wait for all the above to take effect - -#endif /* XTOS_RESET_UNNEEDED */ - diff --git a/src/arch/xtensa/xtos/reset-vector.S b/src/arch/xtensa/xtos/reset-vector.S deleted file mode 100644 index 7848a79b748f..000000000000 --- a/src/arch/xtensa/xtos/reset-vector.S +++ /dev/null @@ -1,678 +0,0 @@ -// reset-vector.S -- Xtensa Reset Vector -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/reset-vector.S#1 $ - -// Copyright (c) 1999-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM -#include -#endif -#include -#include -#include -#include -#include -#include -#include /* for XSHAL_USE_ABSOLUTE_LITERALS only */ -#include -#include "xtos-internal.h" - -#if XCHAL_HAVE_MPU -/* for mpu_write_map opcode */ -#include -#endif - -// The following reset vector avoids initializing certain registers already -// initialized by processor reset. But it does initialize some of them -// anyway, for minimal support of warm restart (restarting in software by -// jumping to the reset vector rather than asserting hardware reset). - - - .begin literal_prefix .ResetVector - .section .ResetVector.text, "ax" - - .align 4 - .global _ResetVector -_ResetVector: - -#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE - // NOTE: - // - // IMPORTANT: If you move the _ResetHandler portion to a section - // other than .ResetVector.text that is outside the range of - // the reset vector's 'j' instruction, the _ResetHandler symbol - // and a more elaborate j/movi/jx sequence are needed in - // .ResetVector.text to dispatch to the new location. - -#if CONFIG_XT_HAVE_RESET_VECTOR_ROM - j _ResetHandler -#else - // This is our VM ROM, it simply jumps to the reset handler. - j .sram_jump // jump over the literals - - .align 4 - .literal_position // tells the assembler/linker to place literals here - -_reset_sram: - .word _ResetHandler - .align 4 -.sram_jump: - l32r a0, _reset_sram // load SRAM reset handler address - jx a0 // jump to the handler -#endif - .size _ResetVector, . - _ResetVector - -# if XCHAL_HAVE_HALT - // Xtensa TX: reset vector segment is only 4 bytes, so must place the - // unpacker code elsewhere in the memory that contains the reset vector. -# if XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTRAM0_VADDR - .section .iram0.text, "ax" -# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_INSTROM0_VADDR - .section .irom0.text, "ax" -# elif XCHAL_RESET_VECTOR_VADDR == XCHAL_URAM0_VADDR - .section .uram0.text, "ax" -# else -# warning "Xtensa TX reset vector not at start of iram0, irom0, or uram0 -- ROMing LSPs may not work" - .text -# endif -# endif - - .extern __memctl_default - -#if CONFIG_XT_BOOT_LOADER || CONFIG_VM_ROM - .section .ResetHandler.text, "ax" - j _ResetHandler -#endif - .align 4 - .literal_position // tells the assembler/linker to place literals here - - // For MPU empty background map -- see XCHAL_HAVE_MPU code further below. - // Cannot put this in .rodata (not unpacked before MPU init). -# if XCHAL_HAVE_MPU && XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2 - .global _xtos_mpu_attribs - .align 4 -_xtos_mpu_attribs: - .word 0x00006000+XCHAL_MPU_ENTRIES-8 // Illegal (---) - .word 0x000F7700+XCHAL_MPU_ENTRIES-8 // Writeback (rwx Cacheable Non-shareable wb rd-alloc wr-alloc) - .word 0x000D5700+XCHAL_MPU_ENTRIES-8 // WBNA (rwx Cacheable Non-shareable wb rd-alloc) - .word 0x000C4700+XCHAL_MPU_ENTRIES-8 // Writethru (rwx Cacheable Non-shareable wt rd-alloc) - .word 0x00006700+XCHAL_MPU_ENTRIES-8 // Bypass (rwx Device non-interruptible system-shareable) -# endif - - .align 4 - .global _ResetHandler -_ResetHandler: -#endif - -#if !XCHAL_HAVE_HALT - - /* - * Even if the processor supports the non-PC-relative L32R option, - * it will always start up in PC-relative mode. We take advantage of - * this, and use PC-relative mode at least until we're sure the .lit4 - * section is in place (which is sometimes only after unpacking). - */ - .begin no-absolute-literals - - // If we have dynamic cache way support, init the caches as soon - // as we can, which is now. Except, if we are waking up from a - // PSO event, then we need to do this slightly later. - -#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS -# if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION - // Do this later on in the code -- see below -# else - movi a0, __memctl_default - wsr.memctl a0 -# endif -#endif - - // If we have PSO support, then we must check for a warm start with - // caches left powered on. If the caches had been left powered on, - // we must restore the state of MEMCTL to the saved state if any. - // Note that MEMCTL may not be present depending on config. - -#if XCHAL_HAVE_PSO_CDM && !XCHAL_HAVE_PSO_FULL_RETENTION - movi a2, XDM_MISC_PWRSTAT // Read PWRSTAT - movi a3, _xtos_pso_savearea // Save area address - retained for later - movi a5, CORE_STATE_SIGNATURE // Signature for compare - retained for later - rer a7, a2 // PWRSTAT value - retained for later - extui a4, a7, 1, 2 // Now bottom 2 bits are core wakeup and cache power lost - bnei a4, 1, .Lcold_start // a4==1 means PSO wakeup, caches did not lose power - l32i a4, a3, CS_SA_signature // Load save area signature field - sub a4, a4, a5 - bnez a4, .Lcold_start // If signature mismatch then do cold start -#if XCHAL_USE_MEMCTL - l32i a4, a3, CS_SA_memctl // Load saved MEMCTL value - movi a0, ~MEMCTL_INV_EN - and a0, a4, a0 // Clear invalidate bit - wsr.memctl a0 -#endif - j .Lwarm_start - -.Lcold_start: - -#if XCHAL_HAVE_ICACHE_DYN_WAYS || XCHAL_HAVE_DCACHE_DYN_WAYS - // Enable and invalidate all ways of both caches. If there is no - // dynamic way support then this write will have no effect. - - movi a0, __memctl_default - wsr.memctl a0 -#endif - -.Lwarm_start: - -#endif - - movi a0, 0 // a0 is always 0 in this code, used to initialize lots of things - -#if XCHAL_HAVE_INTERRUPTS // technically this should be under !FULL_RESET, assuming hard reset - wsr.intenable a0 // make sure that interrupts are shut off (*before* we lower PS.INTLEVEL and PS.EXCM!) -#endif - -#if !XCHAL_HAVE_FULL_RESET - -#if XCHAL_HAVE_CCOUNT && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0) /* pre-LX2 cores only */ - wsr.ccount a0 // not really necessary, but nice; best done very early -#endif - - // For full MMU configs, put page table at an unmapped virtual address. - // This ensures that accesses outside the static maps result - // in miss exceptions rather than random behaviour. - // Assumes XCHAL_SEG_MAPPABLE_VADDR == 0 (true in released MMU). -#if XCHAL_ITLB_ARF_WAYS > 0 || XCHAL_DTLB_ARF_WAYS > 0 - wsr.ptevaddr a0 -#endif - - // Debug initialization - // - // NOTE: DBREAKCn must be initialized before the combination of these two things: - // any load/store, and a lowering of PS.INTLEVEL below DEBUG_LEVEL. - // The processor already resets IBREAKENABLE appropriately. - // -#if XCHAL_HAVE_DEBUG -# if XCHAL_NUM_DBREAK -# if XCHAL_NUM_DBREAK >= 2 - wsr.dbreakc1 a0 -# endif - wsr.dbreakc0 a0 - dsync // wait for WSRs to DBREAKCn to complete -# endif - -# if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RA_2004_1 /* pre-LX cores only */ - // Starting in Xtensa LX, ICOUNTLEVEL resets to zero (not 15), so no need to initialize it. - // Prior to that we do, otherwise we get an ICOUNT exception, 2^32 instructions after reset. - rsr.icountlevel a2 // are we being debugged? (detected by ICOUNTLEVEL not 15, or dropped below 12) - bltui a2, 12, 1f // if so, avoid initializing ICOUNTLEVEL which drops single-steps through here - wsr.icountlevel a0 // avoid ICOUNT exceptions - isync // wait for WSR to ICOUNTLEVEL to complete -1: -# endif -#endif - -#endif /* !XCHAL_HAVE_FULL_RESET */ - -#if XCHAL_HAVE_ABSOLUTE_LITERALS - // Technically, this only needs to be done under !FULL_RESET, assuming hard reset: - wsr.litbase a0 - rsync -#endif - -#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION - // If we're powering up from a temporary power shut-off (PSO), - // restore state saved just prior to shut-off. Note that the - // MEMCTL register was already restored earlier, and as a side - // effect, registers a3, a5, a7 are now preloaded with values - // that we will use here. - // a3 - pointer to save area base address (_xtos_pso_savearea) - // a5 - saved state signature (CORE_STATE_SIGNATURE) - // a7 - contents of PWRSTAT register - - l32i a4, a3, CS_SA_signature // load save area signature - sub a4, a4, a5 // compare signature with expected one -# if XTOS_PSO_TEST - movi a7, PWRSTAT_WAKEUP_RESET // pretend PSO warm start with warm caches -# endif - bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, 1f // wakeup from PSO? (branch if not) - // Yes, wakeup from PSO. Check whether state was properly saved. - addi a5, a7, - PWRSTAT_WAKEUP_RESET // speculatively clear PSO-wakeup bit - movnez a7, a5, a4 // if state not saved (corrupted?), mark as cold start - bnez a4, 1f // if state not saved, just continue with reset - // Wakeup from PSO with good signature. Now check cache status: - bbci.l a7, PWRSTAT_CACHES_LOST_POWER_SHIFT, .Lpso_restore // if caches warm, restore now - // Caches got shutoff. Continue reset, we'll end up initializing caches, and check again later for PSO. -# if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I - j .Ldonesync // skip reset sync, only done for cold start -# endif -1: // Cold start. (Not PSO wakeup.) Proceed with normal full reset. -#endif - -#if XCHAL_HAVE_PRID && XCHAL_HAVE_S32C1I - /* Core 0 initializes the XMP synchronization variable, if present. This operation needs to - happen as early as possible in the startup sequence so that the other cores can be released - from reset. */ - .weak _ResetSync - movi a2, _ResetSync // address of sync variable - rsr.prid a3 // core and multiprocessor ID - extui a3, a3, 0, 8 // extract core ID (FIXME: need proper constants for PRID bits to extract) - beqz a2, .Ldonesync // skip if no sync variable - bnez a3, .Ldonesync // only do this on core 0 - s32i a0, a2, 0 // clear sync variable -.Ldonesync: -#endif -#if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MP_RUNSTALL - /* On core 0, this releases other cores. On other cores this has no effect, because - runstall control is unconnected. */ - movi a2, XER_MPSCORE - wer a0, a2 -#endif - - /* - * For processors with relocatable vectors, apply any alternate - * vector base given to xt-genldscripts, which sets the - * _memmap_vecbase_reset symbol accordingly. - */ -#if XCHAL_HAVE_VECBASE - movi a2, _memmap_vecbase_reset /* note: absolute symbol, not a ptr */ - wsr.vecbase a2 -#endif - -#if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) /* have ATOMCTL ? */ -# if XCHAL_DCACHE_IS_COHERENT - movi a3, 0x25 /* MX -- internal for writeback, RCW otherwise */ -# else - movi a3, 0x15 /* non-MX -- always RCW */ -# endif - wsr.atomctl a3 -#endif - -#if XCHAL_HAVE_INTERRUPTS && XCHAL_HAVE_DEBUG - rsil a2, 1 // lower PS.INTLEVEL here to make reset vector easier to debug -#endif - - /* If either of the caches does not have dynamic way support, then - * use the old (slow) method to init them. If the cache is absent - * the macros will expand to empty. - */ -#if ! XCHAL_HAVE_ICACHE_DYN_WAYS - icache_reset a2, a3 -#endif -#if ! XCHAL_HAVE_DCACHE_DYN_WAYS - dcache_reset a2, a3 -#endif - -#if XCHAL_HAVE_PSO_CDM && ! XCHAL_HAVE_PSO_FULL_RETENTION - // Here, a7 still contains status from the power status register, - // or zero if signature check failed. - bbci.l a7, PWRSTAT_WAKEUP_RESET_SHIFT, .Lcoldstart // wakeup from PSO with good signature? - // Yes, wakeup from PSO. Caches had been powered down, now are initialized. -.Lpso_restore: - // Assume memory still initialized, so all code still unpacked etc. - // So we can just jump/call to relevant state restore code (wherever located). - movi a2, 0 // make shutoff routine return zero - movi a3, _xtos_pso_savearea - // Here, as below for _start, call0 is used as an unlimited-range jump. - call0 _xtos_core_restore_nw - // (does not return) -.Lcoldstart: -#endif - -#if XCHAL_HAVE_PREFETCH - /* Enable cache prefetch if present. */ - movi.n a2, 68 - wsr a2, PREFCTL -#endif - - /* - * Now setup the memory attributes. On some cores this "enables" caches. - * We do this ahead of unpacking, so it can proceed more efficiently. - * - * The _memmap_cacheattr_reset symbol's value (address) is defined - * by the LSP's linker script, as generated by xt-genldscripts. - * If defines 4-bit attributes for eight 512MB regions. - * - * (NOTE: for cores with the older MMU v1 or v2, or without any memory - * protection mechanism, the following code has no effect.) - */ -#if XCHAL_HAVE_MPU - // If there is a user-provided MPU table, then we will program the MPU - // with it now. Can't call xthal_write_map_raw() because code sections - // haven't been unpacked yet. For romable images, the MPU table values - // and the table size must reside in a section that does not need to be - // unpacked (.ResetHandler.text or .srom.text). - // NOTE: This will set CACHEADRDIS to all zeros, because computing a - // useful nonzero value from the user settings is too complex and slow - // to implement here. - - .weak __xt_mpu_init_table // Table of MPU entries - .weak __xt_mpu_init_table_size // Number of entries in table - - movi a2, __xt_mpu_init_table // non-zero if user defined - movi a3, __xt_mpu_init_table_size // non-zero if user defined - beqz a2, .Lno_user_mpu - beqz a3, .Lno_user_mpu - l32i a3, a3, 0 - beqz a3, .Lno_user_mpu // skip if size = 0 - mpu_write_map a2, a3, a12, a13, a14, a15 - j .Lno_default_mpu - -.Lno_user_mpu: - // If there's an empty background map, setup foreground maps to mimic - // region protection. - - /* If there's an empty background map, setup foreground maps to mimic region protection: */ -# if XCHAL_MPU_ENTRIES >= 8 && XCHAL_MPU_BACKGROUND_ENTRIES <= 2 - // We assume reset state: all MPU entries zeroed and disabled. - // Otherwise we'd need a loop to zero everything. - - movi a2, _memmap_cacheattr_reset // note: absolute symbol, not a ptr - movi a3, _xtos_mpu_attribs // see literal area at start of reset vector - movi a4, 0x20000000 // 512 MB delta - movi a6, 8 - movi a7, 1 // MPU entry vaddr 0, with valid bit set - movi a9, 0 // cacheadrdis value - wsr.cacheadrdis a9 // enable everything temporarily while MPU updates - - // Write eight MPU entries, from the last one going backwards (entries n-1 thru n-8) - // -2: extui a8, a2, 28, 4 // get next attribute nibble (msb first) - extui a5, a8, 0, 2 // lower two bit indicate whether cached - slli a9, a9, 1 // add a bit to cacheadrdis... - addi a10, a9, 1 // set that new bit if... - moveqz a9, a10, a5 // ... that region is non-cacheable - addx4 a5, a8, a3 // index into _xtos_mpu_attribs table - addi a8, a8, -5 // make valid attrib indices negative - movgez a5, a3, a8 // if not valid attrib, use Illegal - l32i a5, a5, 0 // load access rights, memtype from table entry - slli a2, a2, 4 - sub a7, a7, a4 // next 512MB region (last to first) - addi a6, a6, -1 - add a5, a5, a6 // add the index - wptlb a5, a7 // write the MPU entry - bnez a6, 2b // loop until done -# else - movi a9, XCHAL_MPU_BG_CACHEADRDIS // default value of CACHEADRDIS for bgnd map -# endif - wsr.cacheadrdis a9 // update cacheadrdis -.Lno_default_mpu: -#elif XCHAL_HAVE_CACHEATTR || XCHAL_HAVE_MIMIC_CACHEATTR || XCHAL_HAVE_XLT_CACHEATTR \ - || (XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) - movi a2, _memmap_cacheattr_reset /* note: absolute symbol, not a ptr */ - cacheattr_set /* set CACHEATTR from a2 (clobbers a3-a8) */ -#endif - - /* Now that caches are initialized, cache coherency can be enabled. */ -#if XCHAL_DCACHE_IS_COHERENT -# if XCHAL_HAVE_EXTERN_REGS && XCHAL_HAVE_MX && (XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RE_2012_0) - /* Opt into coherence for MX (for backward compatibility / testing). */ - movi a3, 1 - movi a2, XER_CCON - wer a3, a2 -# endif -#endif - - /* Enable zero-overhead loop instr buffer and snoop responses if configured. */ - /* If HW erratum 453 fix is to be applied then disable loop instr buffer. */ -#if XCHAL_USE_MEMCTL && (XCHAL_SNOOP_LB_MEMCTL_DEFAULT || XCHAL_ERRATUM_453) - rsr.memctl a2 -#if XCHAL_SNOOP_LB_MEMCTL_DEFAULT - movi a3, XCHAL_SNOOP_LB_MEMCTL_DEFAULT - or a2, a2, a3 -#endif -#if XCHAL_ERRATUM_453 - srli a2, a2, 1 /* clear bit 0 (ZOL buffer enable) */ - slli a2, a2, 1 -#endif - wsr.memctl a2 -#endif - - /* Caches are all up and running, clear PWRCTL.ShutProcOffOnPWait. */ -#if XCHAL_HAVE_PSO_CDM - movi a2, XDM_MISC_PWRCTL - movi a4, ~PWRCTL_CORE_SHUTOFF - rer a3, a2 - and a3, a3, a4 - wer a3, a2 -#endif - -#endif /* !XCHAL_HAVE_HALT */ - - /* - * At this point we can unpack code and data (e.g. copy segments from - * ROM to RAM, vectors into their proper location, etc.). However, - * - * 1) the destination of the unpack may require some setup, - * for instance a DDR controller may need to be initialized - * and enabled before anything is unpacked into DDR. - * 2) users may wish to provide their own unpack code which works - * faster or in a different way than the default unpack code. - * - * To support such uses, we provide a user hook at this point. - * If the user hook function is defined, then it is called from - * here, and its return value (in a2) is checked. If the return - * value is non-zero, then we assume that code unpacking has been - * completed. The user hook function must be written in assembly - * and should make minimal assumptions about system state. - */ - - .weak __reset_user_init - - movi a2, __reset_user_init - beqz a2, 1f // no user hook - callx0 a2 // execute user hook - movi a0, 0 // ensure a0 continues to hold 0 - bnez a2, unpackdone // if a2 != 0 then unpack is done -1: - -#if defined(XTOS_UNPACK) - movi a2, _rom_store_table - beqz a2, unpackdone -unpack: l32i a3, a2, 0 // start vaddr - l32i a4, a2, 4 // end vaddr - l32i a5, a2, 8 // store vaddr - addi a2, a2, 12 - bgeu a3, a4, upnext // skip unless start < end -uploop: l32i a6, a5, 0 - addi a5, a5, 4 - s32i a6, a3, 0 - addi a3, a3, 4 - bltu a3, a4, uploop - j unpack -upnext: bnez a3, unpack - bnez a5, unpack -#endif /* XTOS_UNPACK */ - -unpackdone: - -#if defined(XTOS_UNPACK) || defined(XTOS_MP) - /* - * If writeback caches are configured and enabled, unpacked data must be - * written out to memory before trying to execute it: - */ - dcache_writeback_all a2, a3, a4, 0 - icache_sync a2 // ensure data written back is visible to i-fetch - /* - * Note: no need to invalidate the i-cache after the above, because we - * already invalidated it further above and did not execute anything within - * unpacked regions afterwards. [Strictly speaking, if an unpacked region - * follows this code very closely, it's possible for cache-ahead to have - * cached a bit of that unpacked region, so in the future we may need to - * invalidate the entire i-cache here again anyway.] - */ -#endif - - -#if !XCHAL_HAVE_HALT /* skip for TX */ - - /* - * Now that we know the .lit4 section is present (if got unpacked) - * (and if absolute literals are used), initialize LITBASE to use it. - */ -#if XCHAL_HAVE_ABSOLUTE_LITERALS && XSHAL_USE_ABSOLUTE_LITERALS - /* - * Switch from PC-relative to absolute (litbase-relative) L32R mode. - * Set LITBASE to 256 kB beyond the start of the literals in .lit4 - * (aligns to the nearest 4 kB boundary, LITBASE does not have bits 1..11) - * and set the enable bit (_lit4_start is assumed 4-byte aligned). - */ - movi a2, _lit4_start + 0x40001 - wsr.litbase a2 - rsync -#endif /* have and use absolute literals */ - .end no-absolute-literals // we can now start using absolute literals - - -// Technically, this only needs to be done pre-LX2, assuming hard reset: -# if XCHAL_HAVE_WINDOWED && defined(__XTENSA_WINDOWED_ABI__) - // Windowed register init, so we can call windowed code (eg. C code). - movi a1, 1 - wsr.windowstart a1 - // The processor always clears WINDOWBASE at reset, so no need to clear it here. - // It resets WINDOWSTART to 1 starting with LX2.0/X7.0 (RB-2006.0). - // However, assuming hard reset is not yet always practical, so do this anyway: - wsr.windowbase a0 - rsync - movi a0, 0 // possibly a different a0, clear it -# endif - -#if XCHAL_HW_MIN_VERSION < XTENSA_HWVERSION_RB_2006_0 /* only pre-LX2 needs this */ - // Coprocessor option initialization -# if XCHAL_HAVE_CP - //movi a2, XCHAL_CP_MASK // enable existing CPs - // To allow creating new coprocessors using TC that are not known - // at GUI build time without having to explicitly enable them, - // all CPENABLE bits must be set, even though they may not always - // correspond to a coprocessor. - movi a2, 0xFF // enable *all* bits, to allow dynamic TIE - wsr.cpenable a2 -# endif - - // Floating point coprocessor option initialization (at least - // rounding mode, so that floating point ops give predictable results) -# if XCHAL_HAVE_FP && !XCHAL_HAVE_VECTORFPU2005 - rsync /* wait for WSR to CPENABLE to complete before accessing FP coproc state */ - wur.fcr a0 /* clear FCR (default rounding mode, round-nearest) */ - wur.fsr a0 /* clear FSR */ -# endif -#endif /* pre-LX2 */ - - - // Initialize memory error handler address. - // Putting this address in a register allows multiple instances of - // the same configured core (with separate program images but shared - // code memory, thus forcing memory error vector to be shared given - // it is not VECBASE relative) to have the same memory error vector, - // yet each have their own handler and associated data save area. -#if XCHAL_HAVE_MEM_ECC_PARITY_IGNORE - movi a4, _MemErrorHandler - wsr.mesave a4 -#endif - - - /* - * Initialize medium and high priority interrupt dispatchers: - */ -#if HAVE_XSR && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) - -#if !CONFIG_XT_BOOT_LOADER || CONFIG_VM_ROM -# ifndef XCHAL_DEBUGLEVEL /* debug option not selected? */ -# define XCHAL_DEBUGLEVEL 99 /* bogus value outside 2..6 */ -# endif - - .macro init_vector level - .if GREATERTHAN(XCHAL_NUM_INTLEVELS+1,\level) - .if XCHAL_DEBUGLEVEL-\level - .weak _Level&level&FromVector - movi a4, _Level&level&FromVector - writesr excsave \level a4 - .if GREATERTHAN(\level,XCHAL_EXCM_LEVEL) - movi a5, _Pri_&level&_HandlerAddress - s32i a4, a5, 0 - /* If user provides their own handler, that handler might - * not provide its own _Pri__HandlerAddress variable for - * linking handlers. In that case, the reference below - * would pull in the XTOS handler anyway, causing a conflict. - * To avoid that, provide a weak version of it here: - */ - .pushsection .data, "aw" - .global _Pri_&level&_HandlerAddress - .weak _Pri_&level&_HandlerAddress - .align 4 - _Pri_&level&_HandlerAddress: .space 4 - .popsection - .endif - .endif - .endif - .endm - - init_vector 2 - init_vector 3 - init_vector 4 - init_vector 5 - init_vector 6 -#endif -#endif /*HAVE_XSR*/ - - - /* - * Complete reset initialization outside the vector, - * to avoid requiring a vector that is larger than necessary. - * This 2nd-stage startup code sets up the C Run-Time (CRT) and calls main(). - * - * Here we use call0 not because we expect any return, but - * because the assembler/linker dynamically sizes call0 as - * needed (with -mlongcalls) which it doesn't with j or jx. - * Note: This needs to be call0 regardless of the selected ABI. - */ - -#if CONFIG_XT_BOOT_LOADER && !CONFIG_VM_ROM - movi a0, SOF_TEXT_BASE - callx0 a0 -#else - call0 _start // jump to _start (in crt1-*.S) -#endif - /* does not return */ - -#else /* XCHAL_HAVE_HALT */ - - j _start // jump to _start (in crt1-*.S) - // (TX has max 64kB IRAM, so J always in range) - - // Paranoia -- double-check requirements / assumptions of this Xtensa TX code: -# if !defined(__XTENSA_CALL0_ABI__) || !XCHAL_HAVE_FULL_RESET || XCHAL_HAVE_INTERRUPTS || XCHAL_HAVE_CCOUNT || XCHAL_DTLB_ARF_WAYS || XCHAL_HAVE_DEBUG || XCHAL_HAVE_S32C1I || XCHAL_HAVE_ABSOLUTE_LITERALS || XCHAL_DCACHE_SIZE || XCHAL_ICACHE_SIZE || XCHAL_HAVE_PIF || XCHAL_HAVE_WINDOWED -# error "Halt architecture (Xtensa TX) requires: call0 ABI, all flops reset, no exceptions or interrupts, no TLBs, no debug, no S32C1I, no LITBASE, no cache, no PIF, no windowed regs" -# endif - -#endif /* XCHAL_HAVE_HALT */ - - -#if (!XCHAL_HAVE_HALT || defined(XTOS_UNPACK)) && XCHAL_HAVE_IMEM_LOADSTORE - .size _ResetHandler, . - _ResetHandler -#else - .size _ResetVector, . - _ResetVector -#endif - - .text - .global xthals_hw_configid0, xthals_hw_configid1 - .global xthals_release_major, xthals_release_minor - .end literal_prefix diff --git a/src/arch/xtensa/xtos/shared-reset-vector.S b/src/arch/xtensa/xtos/shared-reset-vector.S deleted file mode 100644 index ae457f9507d3..000000000000 --- a/src/arch/xtensa/xtos/shared-reset-vector.S +++ /dev/null @@ -1,73 +0,0 @@ -// shared-reset-vector.S -- Sharable Reset Vector (requires PRID option) - -// Copyright (c) 1999-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - - // Populate this processor's entry in the reset table. - // The core instance specific LSP should put this section - // in the correct location within the table. - // - .section .ResetTable.rodata, "a" - .word _ResetHandler - - // This sharable reset code assumes RC-2009.0 or later hardware, - // to guarantee that no processor state initialization is required - // prior to doing loads etc. - // Total size is 28 bytes (or 27 with density option). - // By necessity, none of these bytes vary by core instance; - // the appropriate reset table entry is selected using PRID. - // -#if XCHAL_HAVE_PRID - .section .SharedResetVector.text, "ax" - .begin no-absolute-literals - - .align 4 - .global _SharedResetVector -_SharedResetVector: - j .LSharedResetHandler - - .align 4 - .literal_position - - // Use L32R if available -#if XCHAL_HAVE_L32R - .literal rtbase, _ResetTable_base -#endif - - .align 4 -.LSharedResetHandler: - rsr.prid a0 // get processor ID (16 bits) -#if XCHAL_HAVE_L32R - l32r a1, rtbase // force use of L32R -#else - movi a1, _ResetTable_base // No L32R, will likely become CONST16 -#endif - extui a0, a0, 0, 4 // ID of core within the multiprocessor (FIXME: need proper constant...) - addx4 a1, a0, a1 - l32i a1, a1, 0 - jx a1 // jump to core-specific initialization - - .size _SharedResetVector, . - _SharedResetVector - .end no-absolute-literals -#endif - diff --git a/src/arch/xtensa/xtos/stub.c b/src/arch/xtensa/xtos/stub.c deleted file mode 100644 index 8a6dc8359f3d..000000000000 --- a/src/arch/xtensa/xtos/stub.c +++ /dev/null @@ -1,106 +0,0 @@ -// SPDX-License-Identifier: BSD-3-Clause -// -// Those functions are stubs and implemented to ease linking libraries relying on -// certain operating system symbols to be present at link time. -// Those stub are not meant to be called at runtime and will panic if called. - -#include -#include -#include -#include -#include - -struct _reent; - -ssize_t _write_r(struct _reent *ptr, - int fd, - const void *buf, - size_t cnt); -ssize_t _write_r(struct _reent *ptr, - int fd, - const void *buf, - size_t cnt) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return 0; -} - -off_t _lseek_r(struct _reent *ptr, - int fd, - off_t pos, - int whence); -off_t _lseek_r(struct _reent *ptr, - int fd, - off_t pos, - int whence) -{ - off_t ret; - - sof_panic(SOF_IPC_PANIC_ARCH); - return ret; -} - -int _kill_r(struct _reent *ptr, - int pid, - int sig); -int _kill_r(struct _reent *ptr, - int pid, - int sig) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return 0; -} - -void *_sbrk_r(struct _reent *ptr, - ptrdiff_t incr); -void *_sbrk_r(struct _reent *ptr, - ptrdiff_t incr) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return NULL; -} - -void _exit(int __status); -void _exit(int __status) -{ - sof_panic(SOF_IPC_PANIC_ARCH); -} - - -ssize_t _read_r(struct _reent *ptr, - int fd, - void *buf, - size_t cnt); -ssize_t _read_r(struct _reent *ptr, - int fd, - void *buf, - size_t cnt) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return 0; -} - -int _close_r(struct _reent *ptr, int fd); -int _close_r(struct _reent *ptr, int fd) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return 0; -} - - -int _getpid_r(struct _reent *ptr); -int _getpid_r(struct _reent *ptr) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return 0; -} - -int _fstat_r(struct _reent *ptr, - int fd, struct stat *pstat); -int _fstat_r(struct _reent *ptr, - int fd, struct stat *pstat) -{ - sof_panic(SOF_IPC_PANIC_ARCH); - return -1; -} - diff --git a/src/arch/xtensa/xtos/textaddr b/src/arch/xtensa/xtos/textaddr deleted file mode 100755 index 4355facf1b4e..000000000000 --- a/src/arch/xtensa/xtos/textaddr +++ /dev/null @@ -1,59 +0,0 @@ -# Program to determine -Ttext parameter for ld -# $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/textaddr#1 $ - -# Copyright (c) 2001 Tensilica Inc. -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package textaddr; - -use strict; -use FileHandle; - -{ - $::myname = 'textaddr'; - - die("Usage is: $::myname objfile label address\n") - unless @ARGV == 3; - my($objfile, $label, $address) = @ARGV; - - - my $nm = new FileHandle "xt-nm $objfile|"; - die("$::myname: $!, opening pipe to xt-nm $objfile.\n") - unless $nm; - while (<$nm>) { - if (/^([0-9a-f]{8}) . (\w+)$/) { - my $oaddress = $1; - my $olabel = $2; - if ($olabel eq $label) { - printf ("0x%x\n", hex($address) - hex($oaddress)); - exit(0); - } - } - } - die ("$::myname: $label not found in $objfile.\n"); -} - -# -# Local Variables: -# mode:perl -# perl-indent-level:2 -# cperl-indent-level:2 -# End: diff --git a/src/arch/xtensa/xtos/tiny-refs-min.S b/src/arch/xtensa/xtos/tiny-refs-min.S deleted file mode 100644 index 453001cb7294..000000000000 --- a/src/arch/xtensa/xtos/tiny-refs-min.S +++ /dev/null @@ -1,28 +0,0 @@ -// tiny-refs-min.S - References to pull-in selected modules into tiny LSPs - -// Copyright (c) 2006 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - - .global _need_user_vector_ - .set _need_user_vector_, 0 // define this, so if referenced... - .global _UserExceptionVector // ... we pull-in this - diff --git a/src/arch/xtensa/xtos/tiny-refs.S b/src/arch/xtensa/xtos/tiny-refs.S deleted file mode 100644 index d89407e0eed9..000000000000 --- a/src/arch/xtensa/xtos/tiny-refs.S +++ /dev/null @@ -1,33 +0,0 @@ -// tiny-refs.S - References to pull-in selected modules into tiny LSPs - -// Copyright (c) 2006 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#if defined(__SPLIT__level1int) - - .global _need_level1int_ - .set _need_level1int_, 0 // define this, so if referenced... - .global _need_user_vector_ // ... we pull-in this - .global _xtos_l1int_handler // and this - -#endif - - diff --git a/src/arch/xtensa/xtos/user-vector-min.S b/src/arch/xtensa/xtos/user-vector-min.S deleted file mode 100644 index d8f19ea5c1cd..000000000000 --- a/src/arch/xtensa/xtos/user-vector-min.S +++ /dev/null @@ -1,110 +0,0 @@ -// user-vector-min.S - Minimal User Vector for General Exceptions -// Takes less table space, but does not allow registering new handlers. -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/user-vector-min.S#1 $ - -// Copyright (c) 2003-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include "xtos-internal.h" - -#if XCHAL_HAVE_EXCEPTIONS && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) - - // Vector code - .begin literal_prefix .UserExceptionVector - .section .UserExceptionVector.text, "ax" - .align 4 - .global _UserExceptionVector -_UserExceptionVector: -# if (((XSHAL_USER_VECTOR_SIZE >= 28) && XCHAL_HAVE_ADDX && XCHAL_HAVE_DENSITY && XCHAL_HAVE_L32R) || (XSHAL_USER_VECTOR_SIZE >= 36) || XSHAL_VECTORS_PACKED) && !defined(XSHAL_ERRATUM_487_FIX) - - addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. - s32i a2, a1, UEXC_a2 - s32i a3, a1, UEXC_a3 - rsr.exccause a2 // get exception cause - movi a3, _xtos_min_handler_table - bgeui a2, 6, 1f // causes 6 and above map to zero - addx4 a3, a2, a3 // index by cause if 1 .. 5 -1: l32i a3, a3, 0 - s32i a4, a1, UEXC_a4 - jx a3 // jump to cause-specific handler - - .size _UserExceptionVector, . - _UserExceptionVector - .end literal_prefix - -# else /*vector as small as 12 bytes:*/ - - addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. - s32i a2, a1, UEXC_a2 - movi a2, _UserExceptionFromVector // load user exception handler address - //interlock - jx a2 // jump to handler - - .size _UserExceptionVector, . - _UserExceptionVector - .end literal_prefix - - // Dispatch outside vector: - .text - .align 4 - .global _UserExceptionFromVector -_UserExceptionFromVector: - hw_erratum_487_fix - rsr.exccause a2 // get exception cause - s32i a3, a1, UEXC_a3 - movi a3, _xtos_min_handler_table - bgeui a2, 6, 1f // causes 6 and above map to zero - addx4 a3, a2, a3 // index by cause if 1 .. 5 -1: l32i a3, a3, 0 - s32i a4, a1, UEXC_a4 - jx a3 // jump to cause-specific handler - .size _UserExceptionFromVector, . - _UserExceptionFromVector - -# endif - - - /* - * Read-only minimal table of assembly-level exception handlers - * for user vectored exceptions. - * Only provides entries for SYSCALL, MOVSP, and level-1 interrupt causes. - */ - .section .rodata, "a" - .global _xtos_min_handler_table - .align 4 -_xtos_min_handler_table: - .word xtos_unhandled_exception // 0 Illegal Instruction, and causes > 5 - .word _xtos_syscall_handler // 1 SYSCALL Instruction - .word xtos_unhandled_exception // 2 Instruction Fetch Error - .word xtos_unhandled_exception // 3 Load/Store Error -# if XCHAL_HAVE_INTERRUPTS - .word _xtos_l1int_handler // 4 Level-1 Interrupt -# else - .word xtos_unhandled_exception // 4 Level-1 Interrupt (not configured) -# endif -# if XCHAL_HAVE_WINDOWED && !defined(__XTENSA_CALL0_ABI__) - .word _xtos_alloca_handler // 5 Alloca (MOVSP Instruction) -# else - .word xtos_unhandled_exception // 5 Alloca (MOVSP Instruction) (not configured) -# endif - .text - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/user-vector.S b/src/arch/xtensa/xtos/user-vector.S deleted file mode 100644 index 0f17d8300486..000000000000 --- a/src/arch/xtensa/xtos/user-vector.S +++ /dev/null @@ -1,200 +0,0 @@ -// user-vector.S - User Vector for General Exceptions -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/user-vector.S#1 $ - -// Copyright (c) 1998-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include -#include "xtos-internal.h" - -#if XCHAL_HAVE_EXCEPTIONS && (XCHAL_HAVE_XEA1 || XCHAL_HAVE_XEA2) - - // Vector code - .section .UserExceptionVector.text, "ax" - .align 4 - .global _UserExceptionVector -_UserExceptionVector: -# if (((XSHAL_USER_VECTOR_SIZE >= 28) && XCHAL_HAVE_ADDX && XCHAL_HAVE_DENSITY && XCHAL_HAVE_L32R) || (XSHAL_USER_VECTOR_SIZE >= 36) || XSHAL_VECTORS_PACKED) && !defined (XSHAL_ERRATUM_487_FIX) - // There is space to dispatch right at the vector: - - addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. - s32i a2, a1, UEXC_a2 - s32i a3, a1, UEXC_a3 - movi a3, xtos_exc_handler_table - rsr.exccause a2 // get exception cause - //interlock - addx4 a3, a2, a3 - l32i a3, a3, 0 - s32i a4, a1, UEXC_a4 - jx a3 // jump to cause-specific handler - - .size _UserExceptionVector, . - _UserExceptionVector - -# else - // The vector may be as small as 12 bytes: - - addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. - s32i a2, a1, UEXC_a2 - movi a2, _UserExceptionFromVector // load user exception handler address - //interlock - jx a2 // jump to handler - - .size _UserExceptionVector, . - _UserExceptionVector - - // Dispatch outside vector: - .text - //.subsection 2 - .align 4 - .global _UserExceptionFromVector -_UserExceptionFromVector: - hw_erratum_487_fix - s32i a3, a1, UEXC_a3 - movi a3, xtos_exc_handler_table - rsr.exccause a2 // get exception cause - s32i a4, a1, UEXC_a4 - addx4 a3, a2, a3 - l32i a3, a3, 0 - jx a3 // jump to cause-specific handler - - .size _UserExceptionFromVector, . - _UserExceptionFromVector - -# endif - - - .weak xtos_cause3_handler - - /* - * Table of assembly-level general-exception handlers - * (quickly entered) for user vectored exceptions. - * Provides entries for all possible 64 exception causes - * currently allowed for in the EXCCAUSE register. - * - * NOTE: entries that have a corresponding C handler - * (registered at run-time) point to xtos_c_wrapper_handler; - * entries that have no handler point to xtos_unhandled_exception. - */ - .data -#if CONFIG_MULTICORE - .global xtos_exc_handler_table_r -#else - .global xtos_exc_handler_table -#endif - .align 4 -#if CONFIG_MULTICORE -xtos_exc_handler_table_r: -#else -xtos_exc_handler_table: -#endif - .word xtos_unhandled_exception // 0 IllegalInstruction - .word _xtos_syscall_handler // 1 Syscall - .word xtos_unhandled_exception // 2 InstructionFetchError - .word xtos_unhandled_exception // 3 LoadStoreError -# if XCHAL_HAVE_INTERRUPTS - .word _xtos_l1int_handler // 4 Level1Interrupt -# else - .word xtos_unhandled_exception // 4 Level1Interrupt (not configured) -# endif -# if XCHAL_HAVE_WINDOWED && !defined(__XTENSA_CALL0_ABI__) - .word _xtos_alloca_handler // 5 Alloca (MOVSP) -# else - .word xtos_unhandled_exception // 5 Alloca (MOVSP) (not configured) -# endif - .word xtos_unhandled_exception // 6 IntegerDivideByZero - .word xtos_unhandled_exception // 7 Speculation - .word xtos_unhandled_exception // 8 Privileged - .word xtos_unhandled_exception // 9 Unaligned - .word xtos_unhandled_exception //10 (reserved for Tensilica) - .word xtos_unhandled_exception //11 (reserved for Tensilica) - .word xtos_cause3_handler //12 PIF data error on fetch - .word xtos_cause3_handler //13 PIF data error on ld/st - .word xtos_cause3_handler //14 PIF address error on fetch - .word xtos_cause3_handler //15 PIF address error on ld/st - .word xtos_unhandled_exception //16 InstTLBMiss - .word xtos_unhandled_exception //17 InstTLBMultiHit - .word xtos_unhandled_exception //18 InstFetchPrivilege - .word xtos_unhandled_exception //19 (reserved for Tensilica) - .word xtos_unhandled_exception //20 InstFetchProhibited - .word xtos_unhandled_exception //21 (reserved for Tensilica) - .word xtos_unhandled_exception //22 (reserved for Tensilica) - .word xtos_unhandled_exception //23 (reserved for Tensilica) - .word xtos_unhandled_exception //24 LoadStoreTLBMiss - .word xtos_unhandled_exception //25 LoadStoreTLBMultiHit - .word xtos_unhandled_exception //26 LoadStorePrivilege - .word xtos_unhandled_exception //27 (reserved for Tensilica) - .word xtos_unhandled_exception //28 LoadProhibited - .word xtos_unhandled_exception //29 StoreProhibited - .word xtos_unhandled_exception //30 (reserved for Tensilica) - .word xtos_unhandled_exception //31 (reserved for Tensilica) - .rept 8 - .word xtos_unhandled_exception //32-39 CoprocessorDisabled (n = 0..7) - .endr - - .rept XCHAL_EXCCAUSE_NUM-40 - .word xtos_unhandled_exception //40-63 (reserved for TIE) - .endr -#if CONFIG_MULTICORE - .section .bss - .global xtos_exc_handler_table - .align 4 -xtos_exc_handler_table: - .space XCHAL_EXCCAUSE_NUM*4 -#endif - .text - - - // NOTES: - // - // Here are alternative vectors. They will NOT work with - // the handlers currently provided with XTOS. However they - // might be useful to someone writing their own handlers - // from scratch. Note that XSR is only available on T1040 - // and later hardware. - // -//*** The typical tiny 9-byte vector: *** -// wsr.excsave1 a3 // save user a3 -// movi a3, _UserExceptionFromVector // load user exception handler address -// jx a3 -// -//*** Minimizing EXCCAUSE-dispatch delay, not assuming valid SP: *** -// wsr.depc a0 // save a0 (double exceptions fatal here, so not expected) -// rsr.exccause a0 -// xsr.excsave1 a1 // EXCSAVE_1 always contains &exception_handlers[0] -// //interlock -// addx4 a0, a0, a1 -// l32i a0, a0, TABLE_OFS + EXC_CODE_KERNEL*4 -// xsr.excsave1 a1 // restore a1 (DEPC contains original a0) -// jx a0 // jump to cause-specific handler -// -//*** Doing EXCCAUSE-dispatch with table in EXCSAVE_1: *** -// addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. -// s32i a2, a1, UEXC_a2 -// rsr.exccause a2 -// xsr.excsave1 a4 // EXCSAVE_1 always contains &exception_handlers[0] -// s32i a3, a1, UEXC_a3 -// addx4 a2, a2, a4 -// l32i a2, a2, TABLE_OFS + EXC_CODE_KERNEL*4 -// xsr.excsave1 a4 // restore a1 (DEPC contains original a0) -// jx a2 // jump to cause-specific handler - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/xea1/exc-alloca-handler.S b/src/arch/xtensa/xtos/xea1/exc-alloca-handler.S deleted file mode 100644 index c3b63fb0b377..000000000000 --- a/src/arch/xtensa/xtos/xea1/exc-alloca-handler.S +++ /dev/null @@ -1,275 +0,0 @@ -// exc-alloca-handler.S - OBSOLETE - ALLOCA cause exception assembly-level handler - -#if 0 /* This handler is OBSOLETE - now part of window-vectors.S */ - -// Copyright (c) 2002-2010 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -/* - * Code written to the windowed ABI must use the MOVSP instruction to modify - * the stack pointer (except for startup code, which doesn't have a caller). - * The compiler uses MOVSP to allocate very large or variable size stack frames. - * MOVSP guarantees that the caller frame's a0-a3 registers, stored below the - * stack pointer, are moved atomically with respect to interrupts and exceptions - * to satisfy windowed ABI requirements. When user code executes the MOVSP - * instruction and the caller frame is on the stack rather than in the register - * file, the processor takes an ALLOCA exception. The ALLOCA exception handler - * moves the caller frame's a0-a3 registers to follow the stack pointer. - * This file implements this ALLOCA exception handler. - * - * Code written in C can generate a MOVSP in four situations: - * - * 1. By calling "alloca": - * - * void foo(int array_size) { - * char * bar = alloca(array_size); - * ... - * - * 2. By using variable sized arrays (a GNU C extension): - * - * void foo(int array_size) { - * char bar[array_size]; - * ... - * - * 3. By using nested C functions (also a GNU C extension): - * - * void afunction(void) { - * ... - * int anotherfunction(void) { - * } - * ... - * - * 4. By using very large amounts of stack space in a single function. The exact - * limit is 32,760 bytes (including 16-48 bytes of caller frame overhead). - * Typically, users don't encounter this limit unless they have functions - * that locally declare large arrays, for example: - * - * void foo(void) { - * int an_array[8192]; // 32,768 bytes - * int another_array[100]; // 400 bytes - * ... - * - * - * NOTE: This handler only works when MOVSP's destination register is the stack - * pointer "a1" (synonym with "sp"), i.e. "MOVSP a1, ". This is the only - * meaningful form of MOVSP in the windowed ABI, and the only form generated - * by the compiler and used in assembly. The code below does not check the - * destination register, so other forms of MOVSP cause unexpected behaviour. - */ - -#include -#include "xtos-internal.h" - -#define ERROR_CHECKING 1 // define as 0 to save a few bytes - - -#if XCHAL_HAVE_EXCEPTIONS - -//Vector: -// addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. -// s32i a2, a1, UEXC_a2 -// s32i a3, a1, UEXC_a3 -// movi a3, xtos_exc_handler_table -// rsr.exccause a2 -// addx4 a2, a2, a3 -// l32i a2, a2, 0 -// s32i a4, a1, UEXC_a4 -// jx a2 // jump to cause-specific handler - - .global _need_user_vector_ // pull-in real user vector (tiny LSP) - - .text - .align 4 - .global _xtos_alloca_handler -_xtos_alloca_handler: -#if !XCHAL_HAVE_WINDOWED || defined(__XTENSA_CALL0_ABI__) - rfe_rfue -#else /* we have windows w/o call0 abi */ - // HERE: a2, a3, a4 have been saved to - // exception stack frame allocated with a1 (sp). - // a2 contains EXCCAUSE. - // (12 cycles from vector to here, assuming cache hits, 5-stage pipe, etc) - - /* - * Skip the MOVSP instruction so we don't execute it again on return: - */ - - rsr.epc1 a3 // load instruction address (PC) - s32i a5, a1, UEXC_a5 // save a5 - addi a2, a3, 3 // increment PC to skip MOVSP instruction -#if XCHAL_HAVE_LOOPS - /* - * If the MOVSP instruction is the last instruction in the body of - * a zero-overhead loop that must be executed again, then decrement - * the loop count and resume execution at the head of the loop. - */ - rsr.lend a4 - rsr.lcount a5 - bne a4, a2, 1f // done unless next-PC matches LEND - beqz a5, 1f // if LCOUNT zero, not in loop - addi a5, a5, -1 // z.o. loopback! decrement LCOUNT... - wsr.lcount a5 - rsr.lbeg a2 // PC back to start of loop -#endif /*XCHAL_HAVE_LOOPS*/ -1: wsr.epc1 a2 // update return PC past MOVSP - - /* - * Figure out what register MOVSP is moving from ('s' field, 2nd byte). - * If MOVSP is in an instruction RAM or ROM, we can only access it with - * 32-bit loads. So use shifts to read the byte from a 32-bit load. - */ - - addi a3, a3, 1 // advance to byte containing 's' field - extui a2, a3, 0, 2 // get bits 0 and 1 of address of this byte - sub a3, a3, a2 // put address on 32-bit boundary - l32i a3, a3, 0 // get word containing byte (can't use l8ui on IRAM/IROM) - rsr.sar a4 // save SAR - // NOTE: possible addition here: verify destination register is indeed a1. -# if XCHAL_HAVE_BE - ssa8b a2 - sll a3, a3 - extui a3, a3, 28, 4 // extract source register number -# else - ssa8l a2 - srl a3, a3 - extui a3, a3, 0, 4 // extract source register number -# endif - wsr.sar a4 // restore SAR - // (+?? cycles max above = ?? cycles, assuming cache hits, 5-stage pipe, no zoloops, etc) - - movi a4, .Ljmptable // jump table - mov a5, a1 // save the exception stack frame ptr in a5 - addi a1, a1, ESF_TOTALSIZE // restore a1 (in case of MOVSP a1,a1) - -# if XCHAL_HAVE_DENSITY - addx4 a4, a3, a4 // index by src reg number * 4 -# define ALIGN .align 4 // 4-byte jmptable entries -# define MOV _mov.n -# define L32I _l32i.n -# define DONE _bnez.n a4, .Lmove_save_area // a4 known non-zero -# else - addx8 a4, a3, a4 // index by src reg number * 8 -# define ALIGN .align 8 // 8-byte jmptable entries -# define MOV mov -# define L32I l32i -# define DONE j .Lmove_save_area -# endif - - jx a4 // jump into the following table - - ALIGN -.Ljmptable: MOV a1, a0 ; DONE // MOVSP a1, a0 - ALIGN ; DONE // MOVSP a1, a1 - ALIGN ; L32I a1, a5, UEXC_a2 ; DONE // MOVSP a1, a2 - ALIGN ; L32I a1, a5, UEXC_a3 ; DONE // MOVSP a1, a3 - ALIGN ; L32I a1, a5, UEXC_a4 ; DONE // MOVSP a1, a4 - ALIGN ; L32I a1, a5, UEXC_a5 ; DONE // MOVSP a1, a5 - ALIGN ; MOV a1, a6 ; DONE // MOVSP a1, a6 - ALIGN ; MOV a1, a7 ; DONE // MOVSP a1, a7 - ALIGN ; MOV a1, a8 ; DONE // MOVSP a1, a8 - ALIGN ; MOV a1, a9 ; DONE // MOVSP a1, a9 - ALIGN ; MOV a1, a10 ; DONE // MOVSP a1, a10 - ALIGN ; MOV a1, a11 ; DONE // MOVSP a1, a11 - ALIGN ; MOV a1, a12 ; DONE // MOVSP a1, a12 - ALIGN ; MOV a1, a13 ; DONE // MOVSP a1, a13 - ALIGN ; MOV a1, a14 ; DONE // MOVSP a1, a14 - ALIGN ; MOV a1, a15 // MOVSP a1, a15 - -.Lmove_save_area: - // Okay. a1 now contains the new SP value. - -# if ERROR_CHECKING - // Verify it is sensible: - extui a3, a1, 0, 2 // verify that new SP is 4-byte aligned - beqz a3, 1f // if so, skip fixup - -// .global _xtos_misaligned_movsp // make label visible for debugging -//_xtos_misaligned_movsp: -# if XCHAL_HAVE_DEBUG - break 1, 15 // break into debugger (if any) -# endif - sub a1, a1, a3 // FORCE alignment of the new pointer (!) -1: -# endif - -# if XCHAL_HAVE_XEA2 - addi a2, a5, ESF_TOTALSIZE // compute a2 = old SP -# else /*XEA1:*/ - addi a2, a5, ESF_TOTALSIZE-16 // compute a2 = old SP's save area -# endif - // Does new SP (in a1) overlap with exception stack frame (in a5)?: - movi a4, ESF_TOTALSIZE // size of exception stack frame - sub a3, a1, a5 // distance from ESF ptr to new SP - bgeu a3, a4, 1f // does new SP overlap ESF? branch if not - // Move ESF down so it doesn't overlap with the new register save area: - // (a1 = current ESF, a2 = new SP, a4 = ESF_TOTALSIZE) - sub a5, a5, a4 // shift down ESF (by ESF size) - l32i a3, a5, UEXC_a2+ESF_TOTALSIZE - l32i a4, a5, UEXC_a3+ESF_TOTALSIZE - s32i a3, a5, UEXC_a2 - s32i a4, a5, UEXC_a3 - l32i a3, a5, UEXC_a4+ESF_TOTALSIZE - l32i a4, a5, UEXC_a5+ESF_TOTALSIZE - s32i a3, a5, UEXC_a4 - s32i a4, a5, UEXC_a5 -1: - - // Move the register save area (from old SP to new SP): -# if XCHAL_HAVE_XEA2 - l32e a3, a2, -16 - l32e a4, a2, -12 - s32e a3, a1, -16 - s32e a4, a1, -12 - l32e a3, a2, -8 - l32e a4, a2, -4 - s32e a3, a1, -8 - s32e a4, a1, -4 -# else /*XEA1:*/ - addi a1, a1, -16 // point to new save area - l32i a3, a2, 0 - l32i a4, a2, 4 - s32i a3, a1, 0 - s32i a4, a1, 4 - l32i a3, a2, 8 - l32i a4, a2, 12 - s32i a3, a1, 8 - s32i a4, a1, 12 - addi a1, a1, 16 // back to correct new SP -# endif /*XEA1*/ - // (+?? cycles max above = ?? cycles, assuming cache hits, 5-stage pipe, etc) - - // Restore a2, a3, a4, a5, and return: - l32i a2, a5, UEXC_a2 - l32i a3, a5, UEXC_a3 - l32i a4, a5, UEXC_a4 - l32i a5, a5, UEXC_a5 - rfe_rfue - // (+?? cycles max above = ?? cycles, assuming cache hits, 5-stage pipe, etc) - - -#endif /* !XCHAL_HAVE_WINDOWED || __XTENSA_CALL0_ABI */ - - .size _xtos_alloca_handler, . - _xtos_alloca_handler - -#endif /* XCHAL_HAVE_EXCEPTIONS */ - -#endif /* 0 */ - diff --git a/src/arch/xtensa/xtos/xea1/exc-c-wrapper-handler.S b/src/arch/xtensa/xtos/xea1/exc-c-wrapper-handler.S deleted file mode 100644 index 9937e8f7208a..000000000000 --- a/src/arch/xtensa/xtos/xea1/exc-c-wrapper-handler.S +++ /dev/null @@ -1,374 +0,0 @@ -// xea1/exc-c-wrapper-handler.S - General Exception Handler that Dispatches C Handlers - -// Copyright (c) 2002-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include -#include "../xtos-internal.h" -#ifdef SIMULATOR -#include -#endif - -#if XCHAL_HAVE_XEA1 && XCHAL_HAVE_EXCEPTIONS /* note - XEA1 always has exceptions */ - - -/* - * This assembly-level handler causes the associated exception (usually causes 12-15) - * to be handled as if it were exception cause 3 (load/store error exception). - * This provides forward-compatibility with a possible future split of the - * load/store error cause into multiple more specific causes. - */ - .align 4 - .global xtos_cause3_handler -xtos_cause3_handler: - movi a2, EXCCAUSE_LOAD_STORE_ERROR - j xtos_c_wrapper_handler - .size xtos_cause3_handler, . - xtos_cause3_handler - - -/* - * This is the general exception assembly-level handler that dispatches C handlers. - */ - .align 4 - .global xtos_c_wrapper_handler -xtos_c_wrapper_handler: -#ifdef __XTENSA_CALL0_ABI__ - // Redundantly de-allocate and re-allocate stack, so that GDB prologue - // analysis picks up the allocate part, and figures out how to traceback - // through the call stack through the exception. - addi a1, a1, ESF_TOTALSIZE // de-allocate stack frame (FIXME is it safe) -.global xtos_c_wrapper_dispatch -xtos_c_wrapper_dispatch: - // GDB starts analyzing prologue after most recent global symbol, so here: - addi a1, a1, -ESF_TOTALSIZE // re-allocate stack frame -#endif - - // HERE: a2, a3, a4 have been saved to exception stack frame allocated with a1 (sp). - // a2 contains EXCCAUSE. - s32i a5, a1, UEXC_a5 // a5 will get clobbered by ENTRY after the pseudo-CALL4 - // (a4..a15 spilled as needed; save if modified) - - //NOTA: Possible future improvement: - // keep interrupts disabled until we get into the handler, such that - // we don't have to save other critical state such as EXCVADDR here. - //rsr.excvaddr a3 - s32i a2, a1, UEXC_exccause - //s32i a3, a1, UEXC_excvaddr - -#if XCHAL_HAVE_INTERRUPTS - rsilft a3, 1, XTOS_LOCKLEVEL // lockout - rsr.intenable a2 - //movi a3, ~XCHAL_EXCM_MASK - movi a3, ~XTOS_LOCKOUT_MASK // mask out low and medium priority levels, and high priority levels covered by - // XTOS_LOCKLEVEL if any, so we can run at PS.INTLEVEL=0 while manipulating INTENABLE - s32i a2, a1, UEXC_sar // (temporary holding place for INTENABLE value to restore after pseudo-CALL4 below) - and a3, a2, a3 // mask out selected interrupts - wsr.intenable a3 // disable all interrupts up to and including XTOS_LOCKLEVEL -#endif - movi a3, PS_WOECALL4_ABI|PS_UM // WOE=0|1, UM=1, INTLEVEL=0, CALLINC=0|1 (call4 emul), OWB=(dontcare)=0 - - // NOTE: could use XSR here if targeting T1040 or T1050 hardware (requiring slight sequence adjustment as for XEA2): - rsr.ps a2 - rsync //NOT-ISA-DEFINED // wait for WSR to INTENABLE to complete before clearing PS.INTLEVEL - wsr.ps a3 // PS.INTLEVEL=0, effective INTLEVEL (via INTENABLE) is XTOS_LOCKLEVEL - - // HERE: window overflows enabled, but NOT SAFE because we're not quite - // in a valid windowed context (haven't restored a1 yet...); - // so don't cause any (keep to a0..a3) until we've saved critical state and restored a1: - - // NOTE: MUST SAVE EPC1 before causing any overflows, because overflows corrupt EPC1. - rsr.epc1 a3 - s32i a2, a1, UEXC_ps - s32i a3, a1, UEXC_pc - - -#ifdef __XTENSA_CALL0_ABI__ - - s32i a0, a1, UEXC_a0 // save the rest of the registers - s32i a6, a1, UEXC_a6 - s32i a7, a1, UEXC_a7 - s32i a8, a1, UEXC_a8 - s32i a9, a1, UEXC_a9 - s32i a10, a1, UEXC_a10 - s32i a11, a1, UEXC_a11 - s32i a12, a1, UEXC_a12 - s32i a13, a1, UEXC_a13 - s32i a14, a1, UEXC_a14 - s32i a15, a1, UEXC_a15 -# if XTOS_DEBUG_PC - // TODO: setup return PC for call traceback through interrupt dispatch -# endif - - rsync // wait for WSR to PS to complete - -#else /* ! __XTENSA_CALL0_ABI__ */ - -# if XTOS_CNEST - l32i a2, a1, ESF_TOTALSIZE-20 // save nested-C-func call-chain ptr -# endif - addi a1, a1, ESF_TOTALSIZE // restore sp (dealloc ESF) for sane stack again - rsync // wait for WSR to PS to complete - - /* HERE: we can SAFELY get window overflows. - * - * From here, registers a4..a15 automatically get spilled if needed. - * They become a0..a11 after the ENTRY instruction. - * Currently, we don't check whether or not these registers - * get spilled, so we must save and restore any that we - * modify. We've already saved a4 and a5 - * which we modify as part of the pseudo-CALL. - * - * IMPLEMENTATION NOTE: - * - * The pseudo-CALL below effectively saves registers a2..a3 so - * that they are available again after the corresponding - * RETW when returning from the exception handling. We - * could choose to put something like EPC1 or PS in - * there, so they're available more quickly when - * restoring. HOWEVER, exception handlers may wish to - * change such values, or anything on the exception stack - * frame, and expect these to be restored as modified. - * - * NOTA: future: figure out what's the best thing to put - * in a2 and a3. (candidate: a4 and a5 below; but what - * if exception handler manipulates ARs, as in a syscall - * handler.... oh well) - * - * - * Now do the pseudo-CALL. - * Make it look as if the code that got the exception made a - * CALL4 to the exception handling code. (We call - * this the "pseudo-CALL".) - * - * This pseudo-CALL is important and done this way: - * - * 1. There are only three ways to safely update the stack pointer - * in the windowed ABI, such that window exceptions work correctly: - * (a) spill all live windows to stack then switch to a new stack - * (or, save the entire address register file and window - * registers, which is likely even more expensive) - * (b) use MOVSP (or equivalent) - * (c) use ENTRY/RETW - * Doing (a) is excessively expensive, and doing (b) here requires - * copying 16 bytes back and forth which is also time-consuming; - * whereas (c) is very efficient, so that's what we do here. - * - * 2. Normally we cannot do a pseudo-CALL8 or CALL12 here. - * According to the - * windowed ABI, a function must allocate enough space - * for the largest call that it makes. However, the - * pseudo-CALL is executed in the context of the - * function that happened to be executing at the time - * the interrupt was taken, and that function might or - * might not have allocated enough stack space for a - * CALL8 or a CALL12. If we try doing a pseudo-CALL8 - * or -CALL12 here, we corrupt the stack if the - * interrupted function happened to not have allocated - * space for such a call. - * - * 3. We set the return PC, but it's not strictly - * necessary for proper operation. It does make - * debugging, ie. stack tracebacks, much nicer if it - * can point to the interrupted code (not always - * possible, eg. if interrupted code is in a different - * GB than the interrupt handling code, which is - * unlikely in a system without protection where - * interrupt handlers and general application code are - * typically linked together). - * - * IMPORTANT: Interrupts must stay disabled while doing the pseudo-CALL, - * or at least until after the ENTRY instruction, because SP has been - * restored to its original value that does not reflect the exception - * stack frame's allocation. An interrupt taken here would - * corrupt the exception stack frame (ie. allocate another over it). - * (High priority interrupts can remain enabled, they save and restore - * all of their state and use their own stack or save area.) - * For the same reason, we mustn't get any exceptions in this code - * (other than window exceptions where noted) until ENTRY is done. - */ - - // HERE: may get a single window overflow (caused by the following instruction). - -# if XTOS_DEBUG_PC - movi a4, 0xC0000000 // [for debug] for return PC computation below - or a3, a4, a3 // [for debug] set upper two bits of return PC - addx2 a4, a4, a3 // [for debug] clear upper bit -# else - movi a4, 0 // entry cannot cause overflow, cause it here -# endif - - .global _GeneralException -_GeneralException: // this label makes tracebacks through exceptions look nicer - - _entry a1, ESF_TOTALSIZE // as if after a CALL4 (PS.CALLINC set to 1 above) - - /* - * The above ENTRY instruction does a number of things: - * - * 1. Because we're emulating CALL4, the ENTRY rotates windows - * forward by 4 registers (as per 'ROTW +1'), so that - * a4-a15 became a0-a11. So now: a0-a11 are part of - * the interrupted context to be preserved. a0-a1 - * were already saved above when they were a4-a5. - * a12-a15 are free to use as they're NOT part of the - * interrupted context. We don't need to save/restore - * them, and they will get spilled if needed. - * - * 2. Updates SP (new a1), allocating the exception stack - * frame in the new window, preserving the old a1 in - * the previous window. - * - * 3. The underscore prefix prevents the assembler from - * automatically aligning the ENTRY instruction on a - * 4-byte boundary, which could create a fatal gap in - * the instruction stream. - * - * At this point, ie. before we re-enable interrupts, we know the caller is - * always live so we can safely modify a1 without using MOVSP (we can use MOVSP - * but it will never cause an ALLOCA or underflow exception here). - * So this is a good point to modify the stack pointer if we want eg. to - * switch to an interrupt stack (if we do, we need to save the current SP - * because certain things have been saved to that exception stack frame). - * We couldn't do this easily before ENTRY, where the caller wasn't - * necessarily live. - * - * NOTE: We don't switch to an interrupt stack here, because exceptions - * are generally caused by executing code -- so we handle exceptions in - * the context of the thread that cause them, and thus remain on the same - * stack. This means a thread's stack must be large enough to handle - * the maximum level of nesting of exceptions that the thread can cause. - */ - - // NOTA: exception handlers for certain causes may need interrupts to be kept - // disabled through their dispatch, so they can turn them off themselves at - // the right point (if at all), eg. to save critical state unknown to this - // code here, or for some recovery action that must be atomic with respect - // to interrupts.... - // - // Perhaps two versions of this assembly-level handler are needed, one that restores - // interrupts to what they were before the exception was taken (as here) - // and one that ensures at least low-priority interrupts are kept disabled? - // NOTA: For now, always enable interrupts here. - - /* - * Now we can enable interrupts. - * (Pseudo-CALL is complete, and SP reflects allocation of exception stack frame.) - */ - -#endif /* __XTENSA_CALL0_ABI__ */ - - -#if XCHAL_HAVE_INTERRUPTS - //... recompute and set INTENABLE ... - l32i a13, a1, UEXC_sar // (temporary holding place for INTENABLE value saved before pseudo-CALL4 above) - rsr.sar a12 - wsr.intenable a13 // restore INTENABLE as it was on entry -#else - rsr.sar a12 -#endif - - movi a13, xtos_c_handler_table // &table - l32i a15, a1, UEXC_exccause // arg2: exccause - - s32i a12, a1, UEXC_sar - save_loops_mac16 a1, a12, a14 // save LOOP & MAC16 regs, if configured - - addx4 a12, a15, a13 // a12 = table[exccause] - l32i a12, a12, 0 // ... -#ifdef __XTENSA_CALL0_ABI__ - mov a2, a1 // arg1: exception parameters - mov a3, a15 // arg2: exccause - beqz a12, 1f // null handler => skip call - callx0 a12 // call C exception handler for this exception -#else - mov a14, a1 // arg1: exception parameters - // mov a15, a15 // arg2: exccause, already in a15 - beqz a12, 1f // null handler => skip call - callx12 a12 // call C exception handler for this exception -#endif -1: - // Now exit the handler. - - - // Restore special registers - - restore_loops_mac16 a1, a13, a14, a15 // restore LOOP & MAC16 regs, if configured - l32i a14, a1, UEXC_sar - - /* - * Disable interrupts while returning from the pseudo-CALL setup above, - * for the same reason they were disabled while doing the pseudo-CALL: - * this sequence restores SP such that it doesn't reflect the allocation - * of the exception stack frame, which we still need to return from - * the exception. - */ - -#if XCHAL_HAVE_INTERRUPTS - // Must disable interrupts via INTENABLE, because PS.INTLEVEL gets zeroed - // by any window exception exit, eg. the window underflow that may happen - // upon executing the RETW instruction. - // Also, must disable at XTOS_LOCKLEVEL, not just EXCM_LEVEL, because this - // code effectively manipulates virtual INTENABLE state up to the point - // INTENABLE is written in _xtos_return_from_exc. - // - rsilft a12, 1, XTOS_LOCKLEVEL // lockout - rsr.intenable a12 - //movi a13, ~XCHAL_EXCM_MASK - movi a13, ~XTOS_LOCKOUT_MASK // mask out low and medium priority levels, and high priority levels covered by - // XTOS_LOCKLEVEL if any, so we can run at PS.INTLEVEL=0 while manipulating INTENABLE - s32i a12, a1, UEXC_sar // (temporary holding place for INTENABLE value to restore after pseudo-CALL4 below) - and a13, a12, a13 // mask out selected interrupts - wsr.intenable a13 // disable all interrupts up to and including XTOS_LOCKLEVEL -#endif - wsr.sar a14 - - movi a0, _xtos_return_from_exc -#ifdef __XTENSA_CALL0_ABI__ - jx a0 -#else /* ! __XTENSA_CALL0_ABI__ */ - /* Now return from the pseudo-CALL from the interrupted code, to rotate - * our windows back... */ - - movi a13, 0xC0000000 - //movi a13, 3 - //slli a13, a13, 30 -# if XCHAL_HAVE_INTERRUPTS - rsync //NOT-ISA-DEFINED // wait for WSR to INTENABLE to complete before doing RETW - // (ie. before underflow exception exit) - // (not needed, because underflow exception entry does implicit ISYNC ?? - // but in case underflow not taken, WSR must complete before wsr to PS that lowers PS.INTLEVEL - // possibly below XTOS_LOCKLEVEL, in which RETW's jump is not sufficient sync, so a sync - // is needed but it can be placed just before WSR to PS -- but here is fine) -# endif - or a0, a0, a13 // set upper two bits - addx2 a0, a13, a0 // clear upper bit - retw -#endif /* ! __XTENSA_CALL0_ABI__ */ - - /* FIXME: what about _GeneralException ? */ - - .size xtos_c_wrapper_handler, . - xtos_c_wrapper_handler - - -#endif /* XCHAL_HAVE_XEA1 && XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/xea1/exc-return.S b/src/arch/xtensa/xtos/xea1/exc-return.S deleted file mode 100644 index 25bfa32b71ab..000000000000 --- a/src/arch/xtensa/xtos/xea1/exc-return.S +++ /dev/null @@ -1,123 +0,0 @@ -// xea1/exc-return.S - Shared exception/interrupt return code - -// Copyright (c) 2002-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA1 && XCHAL_HAVE_EXCEPTIONS /* XEA1 always has exceptions */ - - .text - .align 4 - .global _xtos_return_from_exc -_xtos_return_from_exc: - -#ifdef __XTENSA_CALL0_ABI__ - - l32i a0, a1, UEXC_a0 // restore general registers, pc, ps - l32i a4, a1, UEXC_a4 - l32i a5, a1, UEXC_a5 - l32i a6, a1, UEXC_a6 - l32i a7, a1, UEXC_a7 - l32i a8, a1, UEXC_a8 - l32i a9, a1, UEXC_a9 - l32i a10, a1, UEXC_a10 - l32i a11, a1, UEXC_a11 - l32i a12, a1, UEXC_a12 - l32i a13, a1, UEXC_a13 - l32i a14, a1, UEXC_a14 - l32i a15, a1, UEXC_a15 - - l32i a2, a1, UEXC_pc - l32i a3, a1, UEXC_ps - wsr.epc1 a2 - wsr.ps a3 - - l32i a2, a1, UEXC_a2 - l32i a3, a1, UEXC_a3 - - rsync // wait for WSR to PS to complete - - addi a1, a1, ESF_TOTALSIZE // restore sp - - rfe - -#else /* ! __XTENSA_CALL0_ABI__ */ - - - // Here we rotated back by N registers, to the interrupted code's register window. - // NOTA: a2 and a3 might contain something useful, but we haven't determined - // what that might be yet (for now, a2 contains nested-C-func call-chain ptr). - - // NOTE: a5 still contains the exception window's exception stack frame pointer. - -# if XTOS_CNEST - s32i a2, a5, ESF_TOTALSIZE-20 // restore nested-C-func call-chain ptr -# endif - l32i a2, a5, UEXC_ps - l32i a3, a5, UEXC_pc - wsr.ps a2 // this sets INTLEVEL to 1; ... - -# if XCHAL_HAVE_INTERRUPTS - l32i a4, a5, UEXC_sar // load INTENABLE value that restores original vpri - l32i a2, a5, UEXC_a2 - rsync // wait for WSR to PS to complete - wsr.intenable a4 // update INTENABLE to restore original vpri (PS.INTLEVEL=1 here) - l32i a4, a5, UEXC_a4 -# else - l32i a2, a5, UEXC_a2 - l32i a4, a5, UEXC_a4 - rsync // wait for WSR to PS to complete -# endif - - /* FIXME: Enabling this here may break task-engine builds - * because task engines have exceptions (sort of), but they do - * not have the EPC_1 special register. XCHAL_HAVE_INTERRUPTS - * is incorrect for normal configs without interrupts but with - * exceptions (we still need to restore EPC_1). The correct - * solution is to define XCHAL_HAVE_EXCEPTIONS more strictly - * to mean something like "Have exceptions with - * user/kernel/double vectors" so that task engines are - * excluded. This would be a change to - * . */ - - wsr.epc1 a3 - // HERE: - // - we cannot get window overflows anymore -- we're NOT in a valid windowed context - // - low-priority interrupts are still disabled - - // NOTE: we don't restore EXCCAUSE or EXCVADDR, not needed. - - // Restore a3, a5: - l32i a3, a5, UEXC_a3 - l32i a5, a5, UEXC_a5 - - rfe_rfue - -#endif /* __XTENSA_CALL0_ABI__ */ - - .size _xtos_return_from_exc, . - _xtos_return_from_exc - -#endif /* XCHAL_HAVE_XEA1 && XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/xea1/int-lowpri-dispatcher.S b/src/arch/xtensa/xtos/xea1/int-lowpri-dispatcher.S deleted file mode 100644 index 500a2036e3bb..000000000000 --- a/src/arch/xtensa/xtos/xea1/int-lowpri-dispatcher.S +++ /dev/null @@ -1,324 +0,0 @@ -// XEA1 - Level-one interrupt dispatcher (user vectored handler) - -// Copyright (c) 1999-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA1 && XCHAL_HAVE_EXCEPTIONS && XCHAL_HAVE_INTERRUPTS - -#define _INTERRUPT_LEVEL 1 - - - // NOTE: something equivalent to the following vector is executed - // before entering this handler (see user-vector.S). -//_UserExceptionVector: -// addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. -// s32i a2, a1, UEXC_a2 -// s32i a3, a1, UEXC_a3 -// movi a3, xtos_exc_handler_table -// rsr.exccause a2 -// addx4 a2, a2, a3 -// l32i a2, a2, 0 -// s32i a4, a1, UEXC_a4 -// jx a2 // jump to cause-specific handler - - .global _need_user_vector_ // pull-in real user vector (tiny LSP) - - .text - .align 4 - .global _xtos_l1int_handler -_xtos_l1int_handler: - // HERE: a2, a3, a4 have been saved to exception stack frame allocated with a1 (sp). - - s32i a5, a1, UEXC_a5 // a5 will get clobbered by ENTRY after pseudo-CALL4 - // (a4..a15 spilled as needed; save if modified) - -#if HAVE_XSR - movi a2, PS_WOECALL4_ABI|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) - xsr.ps a2 - s32i a2, a1, UEXC_ps -#else - rsr.ps a2 - s32i a2, a1, UEXC_ps - movi a2, PS_WOECALL4_ABI|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) - wsr.ps a2 -#endif - rsync - - /* store pc */ - rsr.epc1 a2 - s32i a2, a1, UEXC_pc - - /* store rest of the registers */ - s32i a0, a1, UEXC_a0 - s32i a6, a1, UEXC_a6 - s32i a7, a1, UEXC_a7 - s32i a8, a1, UEXC_a8 - s32i a9, a1, UEXC_a9 - s32i a10, a1, UEXC_a10 - s32i a11, a1, UEXC_a11 - s32i a12, a1, UEXC_a12 - s32i a13, a1, UEXC_a13 - s32i a14, a1, UEXC_a14 - s32i a15, a1, UEXC_a15 - - /* store current sp */ - xtos_addr_percore a2, xtos_saved_sp - s32i a1, a2, 0 - - /* store current task sp */ - xtos_task_ctx_percore a2 - beqz a2, no_context - s32i a1, a2, TC_stack_pointer - -no_context: -# if XTOS_CNEST - l32i a2, a1, ESF_TOTALSIZE-20 // save nested-C-func call-chain ptr -# endif - addi a1, a1, ESF_TOTALSIZE -# if XTOS_DEBUG_PC - rsr.epc1 a4 // [for debug] get return PC - movi a5, 0xC0000000 // [for debug] setup call size... - or a4, a5, a4 // [for debug] set upper two bits of return PC - addx2 a4, a5, a4 // [for debug] clear upper bit -# else - movi a4, 0 /* terminate stack frames, overflow check */ -# endif - _entry a1, ESF_TOTALSIZE - -/* Reset the interrupt level to xtos locklevel (lvl 6 on most systems) */ - - rsil a15, XTOS_LOCKLEVEL - -/* Get bit list of pending interrupts at the current interrupt priority level. - * If bit list is empty, interrupt is spurious (can happen if a - * genuine interrupt brings control this direction, but the interrupt - * goes away before we read the INTERRUPT register). Also save off - * sar, loops, mac16 registers and coprocessors. */ - -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp0 - xchal_cp0_store a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp1 - xchal_cp1_store a11, a12, a13, a14, a15 -#endif -#endif - rsr.interrupt a15 - rsr.intenable a12 - movi a13, XCHAL_INTLEVEL1_MASK - and a15, a15, a12 - and a15, a15, a13 - rsr.sar a14 - s32i a14, a1, UEXC_sar - save_loops_mac16 a1, a13, a14 - - /* switch to interrupt stack */ - xtos_int_stack_addr_percore a13, _INTERRUPT_LEVEL, xtos_stack_for_interrupt - s32i a1, a13, 0 - addi a1, a13, SOF_STACK_SIZE - - _beqz a15, LABEL(spurious,int) - - /* set stack base and size for interrupt context */ - xtos_addr_percore a11, xtos_interrupt_ctx - s32i a13, a11, TC_stack_base - movi a13, SOF_STACK_SIZE - s32i a13, a11, TC_stack_size - - /* save task context */ - xtos_task_ctx_percore a13 - xtos_store_percore a13, a14, xtos_saved_ctx - - /* set interrupt task context */ - xtos_task_ctx_store_percore a11, a14 - - xtos_on_wakeup - -/* Loop to handle all pending interrupts. */ - -LABEL(.L1,_loop0): - neg a12, a15 - and a12, a12, a15 - wsr.intclear a12 // clear if edge-trig or s/w or wr/err (else no effect) -#if CONFIG_MULTICORE - xtos_addr_percore a13, xtos_interrupt_table -#else - movi a13, xtos_interrupt_table -#endif - find_ms_setbit a15, a12, a14, 0 - mapint a15 - addx8 a12, a15, a13 - l32i a13, a12, XIE_HANDLER - l32i a14, a12, XIE_ARG - mov a15, a1 - callx12 a13 - - rsr.interrupt a15 - rsr.intenable a12 - movi a13, XCHAL_INTLEVEL1_MASK - and a15, a15, a12 - and a15, a15, a13 - _bnez a15, LABEL(.L1,_loop0) - -/* Restore everything, and return. */ - - /* restore task context if needed */ - xtos_task_ctx_percore a11 - xtos_addr_percore a12, xtos_interrupt_ctx - bne a11, a12, restore_cp - xtos_addr_percore a12, xtos_saved_ctx - xtos_task_ctx_store_percore a12, a11 - -restore_cp: -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - xtos_task_ctx_percore a11 - beqz a11, no_context_2 - l32i a11, a11, TC_stack_pointer - addi a11, a11, UEXC_cp0 - xchal_cp0_load a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - xtos_task_ctx_percore a11 - beqz a11, no_context_2 - l32i a11, a11, TC_stack_pointer - addi a11, a11, UEXC_cp1 - xchal_cp1_load a11, a12, a13, a14, a15 -#endif -#endif - -no_context_2: - restore_loops_mac16 a1, a13, a14, a15 - l32i a14, a1, UEXC_sar -LABEL(spurious,int): - -#if XCHAL_HAVE_EXCLUSIVE - // Clear exclusive monitors. - clrex -#endif - - movi a0, LABEL(return,from_exc) - movi a13, 0xC0000000 - wsr.sar a14 - or a0, a0, a13 - addx2 a0, a13, a0 -# if _INTERRUPT_LEVEL < XCHAL_EXCM_LEVEL -/* Raise the interrupt mask before - * returning to avoid a race condition where we deallocate the - * exception stack frame but still have more register values to - * restore from it. */ - rsil a14, XCHAL_EXCM_LEVEL -# endif - retw -LABEL(return,from_exc): - /* a5 contains interrupt stack pointer */ - addi a5, a5, -SOF_STACK_SIZE - l32i a5, a5, 0 - -# if XTOS_CNEST - s32i a2, a5, ESF_TOTALSIZE-20 // restore nested-C-func call-chain ptr -# endif - - /* store sp after returning from handler */ - s32i a1, a5, UEXC_a1 - -restore: - /* load registers for window spill */ - l32i a4, a5, UEXC_a4 - l32i a6, a5, UEXC_a6 - l32i a7, a5, UEXC_a7 - l32i a8, a5, UEXC_a8 - l32i a9, a5, UEXC_a9 - l32i a10, a5, UEXC_a10 - l32i a11, a5, UEXC_a11 - l32i a12, a5, UEXC_a12 - l32i a13, a5, UEXC_a13 - l32i a14, a5, UEXC_a14 - - /* check if switch is needed */ - xtos_addr_percore a2, xtos_saved_sp - xtos_task_ctx_percore a1 - beqz a1, noSwitch - l32i a1, a1, TC_stack_pointer - l32i a0, a2, 0 - beq a0, a1, noSwitch - -doSwitch: - /* store new task sp */ - s32i a1, a2, 0 - - /* restore sp of task being preempted */ - l32i a1, a5, UEXC_a1 - - /* spill register windows to the stack */ - rsr.ps a2 - movi a3, PS_WOE_MASK - xor a2, a2, a3 - wsr.ps a2 - - call0 xthal_window_spill_nw - - /* restore previous ps */ - rsr.ps a2 - movi a3, PS_WOE_MASK - or a2, a2, a3 - wsr.ps a2 - - /* change stack */ - xtos_addr_percore a5, xtos_saved_sp - l32i a5, a5, 0 - j restore - -noSwitch: - /* restore ps and pc */ - l32i a0, a5, UEXC_ps - wsr.ps a0 - rsync - l32i a0, a5, UEXC_pc - wsr.epc1 a0 - - /* restore sar, loops and mac16 registers */ - l32i a0, a5, UEXC_sar - wsr.sar a0 - restore_loops_mac16 a5, a0, a1, a2 - - /* restore rest of the registers */ - l32i a0, a5, UEXC_a0 - l32i a1, a5, UEXC_a1 - l32i a2, a5, UEXC_a2 - l32i a3, a5, UEXC_a3 - l32i a15, a5, UEXC_a15 - l32i a5, a5, UEXC_a5 - rfe - - /* FIXME: what about _LevelOneInterrupt ? */ - .size _xtos_l1int_handler, . - _xtos_l1int_handler - -#endif /* XCHAL_HAVE_XEA1 && XCHAL_HAVE_EXCEPTIONS && XCHAL_HAVE_INTERRUPTS */ diff --git a/src/arch/xtensa/xtos/xea1/intlevel-restore.S b/src/arch/xtensa/xtos/xea1/intlevel-restore.S deleted file mode 100644 index 40b91dc1e6be..000000000000 --- a/src/arch/xtensa/xtos/xea1/intlevel-restore.S +++ /dev/null @@ -1,75 +0,0 @@ -// xea1/intlevel-restore.S - Interrupt related assembler code - _xtos_restore_intlevel - -// Copyright (c) 2004-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA1 - -/*************************************************************************** - * void _xtos_restore_intlevel(unsigned restoreval); - * - * _xtos_restore_intlevel() restores the current interrupt level - * according to a value returned by _xtos_set_intlevel() or - * _xtos_set_min_intlevel() (or one of the corresponding macros). - * - * NOTE: In XEA1, this function is implemented identically - * to _xtos_set_vpri(). - */ - -/*************************************************************************** - * _xtos_set_vpri() is used to set the current virtual priority from C code; - * it can be called from the application or from a C interrupt handler. - */ - - .text - .global _xtos_restore_intlevel - .type _xtos_restore_intlevel,@function - .global _xtos_set_vpri - .type _xtos_set_vpri,@function - .align 4 -_xtos_set_vpri: -_xtos_restore_intlevel: - abi_entry -#if XCHAL_HAVE_INTERRUPTS && XTOS_VIRTUAL_INTENABLE - mov a3, a2 -#if CONFIG_MULTICORE - xtos_addr_percore a4, xtos_intstruct -#else - movi a4, _xtos_intstruct -#endif - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - l32i a2, a4, XTOS_VPRI_ENABLED_OFS // return old xtos_vpri_enabled (current vpri) - l32i a5, a4, XTOS_ENABLED_OFS // a3 = xtos_enabled - s32i a3, a4, XTOS_VPRI_ENABLED_OFS // set new xtos_vpri_enabled (current vpri) - and a5, a5, a3 // a5 = xtos_enabled & xtos_vpri_enabled - wsr.intenable a5 - xtos_unlock a7 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - .size _xtos_set_vpri, . - _xtos_set_vpri - -#endif /* XEA1 */ - diff --git a/src/arch/xtensa/xtos/xea1/intlevel-set.S b/src/arch/xtensa/xtos/xea1/intlevel-set.S deleted file mode 100644 index b06736e7c2ed..000000000000 --- a/src/arch/xtensa/xtos/xea1/intlevel-set.S +++ /dev/null @@ -1,76 +0,0 @@ -// xea1/intlevel-set.S - Interrupt related assembler code - _xtos_set_intlevel - -// Copyright (c) 2004-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA1 - - -/*************************************************************************** - * unsigned _xtos_set_intlevel(int intlevel); - * - * _xtos_set_intlevel() is used to set the current priority from C code; - * it can be called from the application or from a C interrupt handler. - * - * NOTE: This version allows the 'intlevel' parameter to be computed - * at run-time, and thus is longer. It is much more efficient, and - * highly recommented, to use the XTOS_SET_INTLEVEL(intlevel) macro instead - * (which requires a constant intlevel). - */ - - .text - .align 4 - .global _xtos_set_intlevel - .type _xtos_set_intlevel,@function -_xtos_set_intlevel: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - /* In XEA1, we have to rely on INTENABLE register virtualization: */ - movi a4, Xthal_intlevel_andbelow_mask - extui a3, a2, 0, 4 // keep only INTLEVEL bits of parameter - addx4 a5, a3, a4 // index mask to use - l32i a3, a5, 0 // get mask of interrupts at requested intlevel and below - movi a5, -1 // all 1's -#if CONFIG_MULTICORE - xtos_addr_percore a4, xtos_intstruct -#else - movi a4, _xtos_intstruct -#endif - xor a3, a3, a5 // mask of interrupts at intlevels above the requested one (to enable) - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - l32i a2, a4, XTOS_VPRI_ENABLED_OFS // return old xtos_vpri_enabled (current vpri) - l32i a5, a4, XTOS_ENABLED_OFS // a5 = xtos_enabled - s32i a3, a4, XTOS_VPRI_ENABLED_OFS // set new xtos_vpri_enabled (current vpri) - and a5, a5, a3 // a5 = xtos_enabled & xtos_vpri_enabled - wsr.intenable a5 - xtos_unlock a7 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - - .size _xtos_set_intlevel, . - _xtos_set_intlevel - -#endif /* XEA1 */ - diff --git a/src/arch/xtensa/xtos/xea1/intlevel-setmin.S b/src/arch/xtensa/xtos/xea1/intlevel-setmin.S deleted file mode 100644 index 051415d626ce..000000000000 --- a/src/arch/xtensa/xtos/xea1/intlevel-setmin.S +++ /dev/null @@ -1,79 +0,0 @@ -// xea1/intlevel-setmin.S - Interrupt related assembler code - _xtos_set_min_intlevel - -// Copyright (c) 2004-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA1 - - -/*************************************************************************** - * unsigned _xtos_set_min_intlevel(int intlevel); - * - * _xtos_set_min_intlevel() is identical to _xtos_set_intlevel() except - * that it will not lower the current interrupt level. Instead, - * it ensures that the current interrupt level is at least as high - * as specified. - * - * NOTE: This version allows the 'intlevel' parameter to be computed - * at run-time, and thus is longer. It is much more efficient, and - * highly recommented, to use the XTOS_SET_MIN_INTLEVEL(intlevel) macro instead - * (which requires a constant intlevel). - */ - - .text - .align 4 - .global _xtos_set_min_intlevel - .type _xtos_set_min_intlevel,@function -_xtos_set_min_intlevel: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - /* In XEA1, we have to rely on INTENABLE register virtualization: */ - movi a4, Xthal_intlevel_andbelow_mask - extui a3, a2, 0, 4 // keep only INTLEVEL bits of parameter - addx4 a5, a3, a4 // index mask to use - l32i a3, a5, 0 // get mask of interrupts at requested intlevel and below - movi a5, -1 // all 1's -#if CONFIG_MULTICORE - xtos_addr_percore a4, xtos_intstruct -#else - movi a4, _xtos_intstruct -#endif - xor a3, a3, a5 // mask of interrupts at intlevels above the requested one (to enable) - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - l32i a2, a4, XTOS_VPRI_ENABLED_OFS // return old xtos_vpri_enabled (current vpri) - l32i a5, a4, XTOS_ENABLED_OFS // a5 = xtos_enabled - and a3, a3, a2 // make sure we don't enable any new interrupts - s32i a3, a4, XTOS_VPRI_ENABLED_OFS // set new xtos_vpri_enabled (current vpri) - and a5, a5, a3 // a5 = xtos_enabled & xtos_vpri_enabled - wsr.intenable a5 - xtos_unlock a7 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - - .size _xtos_set_min_intlevel, . - _xtos_set_min_intlevel - -#endif /* XEA1 */ - diff --git a/src/arch/xtensa/xtos/xea1/window-vectors.S b/src/arch/xtensa/xtos/xea1/window-vectors.S deleted file mode 100644 index 1670ed988429..000000000000 --- a/src/arch/xtensa/xtos/xea1/window-vectors.S +++ /dev/null @@ -1,355 +0,0 @@ -// window-vectors-xea1.S - Register Window Overflow/Underflow Handlers for XEA1 -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/xea1/window-vectors.S#1 $ - -// Copyright (c) 1999-2013 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - -#if XCHAL_HAVE_XEA1 -#if XCHAL_HAVE_WINDOWED && !defined(__XTENSA_CALL0_ABI__) - -# ifndef NO_SECTION_DIRECTIVES -// Exports -.global _WindowOverflow4 -.global _WindowUnderflow4 -.global _WindowOverflow8 -.global _WindowUnderflow8 -.global _WindowOverflow12 -.global _WindowUnderflow12 -.global _xtos_alloca_handler - - // Note: the current window exception vectors do not generate any - // literals. Hence the literal_prefix directive is not necessary. - // Specifying it "just in case" creates an empty section (named - // ".WindowVectors.literal") which can in some cases cause linking - // problems (the linker scripts don't place it anywhere). - // So leave it commented out: - // - //.begin literal_prefix .WindowVectors - - .section .WindowVectors.text, "ax" -# endif - - -// -// GENERAL NOTES: -// -// These window exception handlers need not be modified. -// They are specific to the windowed call ABI only. -// -// Underflow Handlers: -// -// The underflow handler for returning from call[i+1] to call[i] -// must preserve all the registers from call[i+1]'s window. -// In particular, a0 and a1 must be preserved because the RETW instruction -// will be reexecuted (and may even underflow again if an intervening -// exception has flushed call[i]'s registers). -// Registers a2 and up may contain return values. -// -// The caller could also potentially assume that the callee's a0 and a1 -// (its own a4&a5 if call4, a8&a9 if call8, a12&a13 if call12) -// are correct for whatever reason (not a clean thing to do in general, -// but if it's possible, unless the ABI explicitly prohibits it, -// it will eventually be done :) -- whether the the ABI needs to -// prohibit this is a different question). -// -// Timing of Handlers: -// -// Here is an overview of the overhead of taking a window exception, -// ie. the number of additional cycles taken relative to case where -// an exception is not taken. -// NOTE: these numbers do not take into account any cache misses, -// write buffer stalls, or other external stalls, if they occur. -// The totals consist of 5 cycles to enter the handler (or 6 or 7 -// for optional longer pipelines in Xtensa LX), the number of instructions -// and interlocks (2nd and 3rd columns below), and 2 cycles jump delay -// on return (3 cycles for optional longer I-side pipeline in Xtensa LX): -// -// Instruction+bubbles Totals (5-stage) -// XEA1 XEA2 XEA1 XEA2 -// Overflow-4 7 5 14 12 -// Overflow-8 14 10 21 17 -// Overflow-12 18 14 25 21 -// Underflow-4 6 5 13 12 -// Underflow-8 14 10 21 17 -// Underflow-12 18 14 25 21 -// -// Underflow-8 15 12 25 22 (7-stage; could be made 1 less) -// Underflow-12 19 16 29 26 (7-stage; could be made 1 less) - -#ifndef WINDOW_BASE_VECOFS -#define WINDOW_BASE_VECOFS XCHAL_WINDOW_OF4_VECOFS -#endif - - -// 4-Register Window Overflow Vector (Handler) -// -// Invoked if a call[i] referenced a register (a4-a15) -// that contains data from ancestor call[j]; -// call[j] had done a call4 to call[j+1]. -// On entry here: -// window rotated to call[j] start point; -// a0-a3 are registers to be saved; -// a4-a15 must be preserved; -// a5 is call[j+1]'s stack pointer. - - .org XCHAL_WINDOW_OF4_VECOFS - WINDOW_BASE_VECOFS -_WindowOverflow4: - addi a5, a5, -16 // to make store offsets positive - s32i a0, a5, 0 // save a0 to call[j+1]'s stack frame - s32i a1, a5, 4 // save a1 to call[j+1]'s stack frame - s32i a2, a5, 8 // save a2 to call[j+1]'s stack frame - s32i a3, a5, 12 // save a3 to call[j+1]'s stack frame - addi a5, a5, 16 // restore a5 - rfwo // rotates back to call[i] position - - .size _WindowOverflow4, . - _WindowOverflow4 - - -// ALLOCA exception handler -// -// NOTE: The alloca exception handler is squeezed in between the window exception -// handlers in order to save space, and also to allow short-range jumps to the -// window underflow handlers (see below for why). Because of the limited space in -// between the window handlers, this function is split into two to fit. -// -// Code written to the windowed ABI must use the MOVSP instruction to modify -// the stack pointer (except for startup code, which doesn't have a caller). -// The compiler uses MOVSP to allocate very large or variable size stack frames. -// MOVSP guarantees that the caller frame's a0-a3 registers, stored below the -// stack pointer, are moved atomically with respect to interrupts and exceptions -// to satisfy windowed ABI requirements. When user code executes the MOVSP -// instruction and the caller frame is on the stack rather than in the register -// file, the processor takes an ALLOCA exception. -// -// The XTOS user exception dispatcher allocates an exception frame on the -// stack and saves a2-a4 into that frame before calling us. So we need to -// restore those registers and deallocate the stack frame before jumping -// to the window underflow handler - which will restore the spilled registers -// back into the register file. -// The fact the alloca exception was taken means the registers associated with -// the base-save area have been spilled and will be restored by the underflow -// handler, so those 4 registers are available for scratch. - - .align 4 - -_xtos_alloca_handler: - - l32i a2, a1, UEXC_a2 // restore a2-a4 and deallocate frame - l32i a3, a1, UEXC_a3 - l32i a4, a1, UEXC_a4 - addi a1, a1, ESF_TOTALSIZE - wsr.excsave1 a0 // save a0 - rsr.windowbase a0 // grab WINDOWBASE before rotw changes it - rotw -1 // WINDOWBASE goes to a4, new a0-a3 are scratch - rsr.ps a2 - extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS - xor a3, a3, a4 // bits changed from old to current windowbase - j _xtos_alloca_2 // not enough room here... - - .size _xtos_alloca_handler, . - _xtos_alloca_handler - - -// 4-Register Window Underflow Vector (Handler) -// -// Invoked by RETW returning from call[i+1] to call[i] -// where call[i]'s registers must be reloaded (not live in ARs); -// call[i] had done a call4 to call[i+1]. -// On entry here: -// window rotated to call[i] start point; -// a0-a3 are undefined, must be reloaded with call[i].reg[0..3]; -// a4-a15 must be preserved (they are call[i+1].reg[0..11]); -// a5 is call[i+1]'s stack pointer. - - .org XCHAL_WINDOW_UF4_VECOFS - WINDOW_BASE_VECOFS -_WindowUnderflow4: - addi a3, a5, -16 // to make load offsets positive - l32i a0, a3, 0 // restore a0 from call[i+1]'s stack frame - l32i a1, a3, 4 // restore a1 from call[i+1]'s stack frame - l32i a2, a3, 8 // restore a2 from call[i+1]'s stack frame - l32i a3, a3, 12 // restore a3 from call[i+1]'s stack frame - rfwu - - .size _WindowUnderflow4, . - _WindowUnderflow4 - - -// This is the second part of the alloca handler. - - .align 4 - -_xtos_alloca_2: - - rsr.excsave1 a4 // restore original a0 (now in a4) - slli a3, a3, XCHAL_PS_OWB_SHIFT - xor a2, a2, a3 // flip changed bits in old window base - wsr.ps a2 // update PS.OWB to new window base - rsync - _bbci.l a4, 31, _WindowUnderflow4 - rotw -1 // original a0 goes to a8 - _bbci.l a8, 30, _WindowUnderflow8 - rotw -1 - j _WindowUnderflow12 - - .size _xtos_alloca_2, . - _xtos_alloca_2 - -// 8-Register Window Overflow Vector (Handler) -// -// Invoked if a call[i] referenced a register (a4-a15) -// that contains data from ancestor call[j]; -// call[j] had done a call8 to call[j+1]. -// On entry here: -// window rotated to call[j] start point; -// a0-a7 are registers to be saved; -// a8-a15 must be preserved; -// a9 is call[j+1]'s stack pointer. - - .org XCHAL_WINDOW_OF8_VECOFS - WINDOW_BASE_VECOFS -_WindowOverflow8: - addi a9, a9, -16 // to make store offsets positive - s32i a0, a9, 0 // save a0 to call[j+1]'s stack frame - addi a0, a1, -16 // a0 <- call[j-1]'s sp - s32i a1, a9, 4 // save a1 to call[j+1]'s stack frame - l32i a0, a0, 4 // (used to find end of call[j]'s frame) - s32i a2, a9, 8 // save a2 to call[j+1]'s stack frame - s32i a3, a9, 12 // save a3 to call[j+1]'s stack frame - addi a9, a9, 16 // restore a9 - addi a0, a0, -32 // to make load offsets positive - s32i a4, a0, 0 // save a4 to call[j]'s stack frame - s32i a5, a0, 4 // save a5 to call[j]'s stack frame - s32i a6, a0, 8 // save a6 to call[j]'s stack frame - s32i a7, a0, 12 // save a7 to call[j]'s stack frame - rfwo // rotates back to call[i] position - - .size _WindowOverflow8, . - _WindowOverflow8 - - -// 8-Register Window Underflow Vector (Handler) -// -// Invoked by RETW returning from call[i+1] to call[i] -// where call[i]'s registers must be reloaded (not live in ARs); -// call[i] had done a call8 to call[i+1]. -// On entry here: -// window rotated to call[i] start point; -// a0-a7 are undefined, must be reloaded with call[i].reg[0..7]; -// a8-a15 must be preserved (they are call[i+1].reg[0..7]); -// a9 is call[i+1]'s stack pointer. - - .org XCHAL_WINDOW_UF8_VECOFS - WINDOW_BASE_VECOFS -_WindowUnderflow8: - addi a9, a9, -16 // to make load offsets positive - l32i a0, a9, 0 // restore a0 from call[i+1]'s stack frame - l32i a1, a9, 4 // restore a1 from call[i+1]'s stack frame - l32i a2, a9, 8 // restore a2 from call[i+1]'s stack frame - addi a7, a1, -16 // a7 <- call[i-1]'s sp - l32i a7, a7, 4 // (used to find end of call[i]'s frame) - l32i a3, a9, 12 // restore a3 from call[i+1]'s stack frame - addi a9, a9, 16 // restore a9 - addi a7, a7, -32 // to make load offsets positive - l32i a4, a7, 0 // restore a4 from call[i]'s stack frame - l32i a5, a7, 4 // restore a5 from call[i]'s stack frame - l32i a6, a7, 8 // restore a6 from call[i]'s stack frame - l32i a7, a7, 12 // restore a7 from call[i]'s stack frame - rfwu - - .size _WindowUnderflow8, . - _WindowUnderflow8 - - -// 12-Register Window Overflow Vector (Handler) -// -// Invoked if a call[i] referenced a register (a4-a15) -// that contains data from ancestor call[j]; -// call[j] had done a call12 to call[j+1]. -// On entry here: -// window rotated to call[j] start point; -// a0-a11 are registers to be saved; -// a12-a15 must be preserved; -// a13 is call[j+1]'s stack pointer. - - .org XCHAL_WINDOW_OF12_VECOFS - WINDOW_BASE_VECOFS -_WindowOverflow12: - addi a13, a13, -16 // to make store offsets positive - s32i a0, a13, 0 // save a0 to call[j+1]'s stack frame - addi a0, a1, -16 // a0 <- call[j-1]'s sp - s32i a1, a13, 4 // save a1 to call[j+1]'s stack frame - l32i a0, a0, 4 // (used to find end of call[j]'s frame) - s32i a2, a13, 8 // save a2 to call[j+1]'s stack frame - s32i a3, a13, 12 // save a3 to call[j+1]'s stack frame - addi a13, a13, 16 // restore a13 - addi a0, a0, -48 // to make load offsets positive - s32i a4, a0, 0 // save a4 to end of call[j]'s stack frame - s32i a5, a0, 4 // save a5 to end of call[j]'s stack frame - s32i a6, a0, 8 // save a6 to end of call[j]'s stack frame - s32i a7, a0, 12 // save a7 to end of call[j]'s stack frame - s32i a8, a0, 16 // save a8 to end of call[j]'s stack frame - s32i a9, a0, 20 // save a9 to end of call[j]'s stack frame - s32i a10, a0, 24 // save a10 to end of call[j]'s stack frame - s32i a11, a0, 28 // save a11 to end of call[j]'s stack frame - rfwo // rotates back to call[i] position - - .size _WindowOverflow12, . - _WindowOverflow12 - - -// 12-Register Window Underflow Vector (Handler) -// -// Invoked by RETW returning from call[i+1] to call[i] -// where call[i]'s registers must be reloaded (not live in ARs); -// call[i] had done a call12 to call[i+1]. -// On entry here: -// window rotated to call[i] start point; -// a0-a11 are undefined, must be reloaded with call[i].reg[0..11]; -// a12-a15 must be preserved (they are call[i+1].reg[0..3]); -// a13 is call[i+1]'s stack pointer. - - .org XCHAL_WINDOW_UF12_VECOFS - WINDOW_BASE_VECOFS -_WindowUnderflow12: - addi a13, a13, -16 // to make load offsets positive - l32i a0, a13, 0 // restore a0 from call[i+1]'s stack frame - l32i a1, a13, 4 // restore a1 from call[i+1]'s stack frame - l32i a2, a13, 8 // restore a2 from call[i+1]'s stack frame - addi a11, a1, -16 // a11 <- call[i-1]'s sp - l32i a11, a11, 4 // (used to find end of call[i]'s frame) - l32i a3, a13, 12 // restore a3 from call[i+1]'s stack frame - addi a13, a13, 16 // restore a13 - addi a11, a11, -48 // to make load offsets positive - l32i a4, a11, 0 // restore a4 from end of call[i]'s stack frame - l32i a5, a11, 4 // restore a5 from end of call[i]'s stack frame - l32i a6, a11, 8 // restore a6 from end of call[i]'s stack frame - l32i a7, a11, 12 // restore a7 from end of call[i]'s stack frame - l32i a8, a11, 16 // restore a8 from end of call[i]'s stack frame - l32i a9, a11, 20 // restore a9 from end of call[i]'s stack frame - l32i a10, a11, 24 // restore a10 from end of call[i]'s stack frame - l32i a11, a11, 28 // restore a11 from end of call[i]'s stack frame - rfwu - - .size _WindowUnderflow12, . - _WindowUnderflow12 - - -# ifndef NO_SECTION_DIRECTIVES - //.end literal_prefix - .text -# endif - - -#endif /* XCHAL_HAVE_WINDOWED && !defined(__XTENSA_CALL0_ABI__) */ -#endif /* XCHAL_HAVE_XEA1 */ - diff --git a/src/arch/xtensa/xtos/xea2/exc-c-wrapper-handler.S b/src/arch/xtensa/xtos/xea2/exc-c-wrapper-handler.S deleted file mode 100644 index 08beb65b6b3a..000000000000 --- a/src/arch/xtensa/xtos/xea2/exc-c-wrapper-handler.S +++ /dev/null @@ -1,361 +0,0 @@ -// exc-c-wrapper-handler.S - General Exception Handler that Dispatches C Handlers - -// Copyright (c) 2002-2017 Cadence Design Systems, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA2 && XCHAL_HAVE_EXCEPTIONS - - -/* - * This assembly-level handler causes the associated exception (usually causes 12-15) - * to be handled as if it were exception cause 3 (load/store error exception). - * This provides forward-compatibility with a possible future split of the - * load/store error cause into multiple more specific causes. - */ - .align 4 - .global xtos_cause3_handler -xtos_cause3_handler: - movi a2, EXCCAUSE_LOAD_STORE_ERROR - j xtos_c_wrapper_handler - .size xtos_cause3_handler, . - xtos_cause3_handler - - - .align 4 -.Lhi: addi a2, a2, -XCHAL_EXCM_LEVEL - add a2, a2, a3 - j .Lps - - -/* - * This is the general exception assembly-level handler that dispatches C handlers. - */ - .align 4 - .global xtos_c_wrapper_handler -xtos_c_wrapper_handler: -#ifdef __XTENSA_CALL0_ABI__ - // Redundantly de-allocate and re-allocate stack, so that GDB prologue - // analysis picks up the allocate part, and figures out how to traceback - // through the call stack through the exception. - addi a1, a1, ESF_TOTALSIZE // de-allocate stack frame (FIXME is it safe) -.global xtos_c_wrapper_dispatch -xtos_c_wrapper_dispatch: - // GDB starts analyzing prologue after most recent global symbol, so here: - addi a1, a1, -ESF_TOTALSIZE // re-allocate stack frame -#endif - - // HERE: a2, a3, a4 have been saved to exception stack frame allocated with a1 (sp). - // a2 contains EXCCAUSE. - s32i a5, a1, UEXC_a5 // a5 will get clobbered by ENTRY after the pseudo-CALL4 - // (a4..a15 spilled as needed; save if modified) - - //NOTA: Possible future improvement: - // keep interrupts disabled until we get into the handler, such that - // we don't have to save other critical state such as EXCVADDR here. - //rsr.excvaddr a3 - s32i a2, a1, UEXC_exccause - //s32i a3, a1, UEXC_excvaddr - - // Set PS fields: - // EXCM = 0 - // WOE = __XTENSA_CALL0_ABI__ ? 0 : 1 - // UM = 1 - // INTLEVEL = MIN(INTLEVEL,EXCM_LEVEL) - // CALLINC = __XTENSA_CALL0_ABI__ ? 0 : 1 - // OWB = 0 (really, a dont care if !__XTENSA_CALL0_ABI__) - - rsr.ps a3 - movi a2, PS_WOECALL4_ABI|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) // CALL4 emulation - s32i a3, a1, UEXC_ps - extui a3, a3, 0, 4 // extract PS.INTLEVEL - bgeui a3, XCHAL_EXCM_LEVEL+1, .Lhi // at PS.INTLEVEL > EXCM_LEVEL ? -.Lps: rsr.epc1 a3 - wsr.ps a2 - - // HERE: window overflows enabled, but NOT SAFE because we're not quite - // in a valid windowed context (haven't restored a1 yet...); - // so don't cause any (keep to a0..a3) until we've saved critical state and restored a1: - - // NOTE: MUST SAVE EPC1 before causing any overflows, because overflows corrupt EPC1. - s32i a3, a1, UEXC_pc - -#ifdef __XTENSA_CALL0_ABI__ - - s32i a0, a1, UEXC_a0 // save the rest of the registers - s32i a6, a1, UEXC_a6 - s32i a7, a1, UEXC_a7 - s32i a8, a1, UEXC_a8 - s32i a9, a1, UEXC_a9 - s32i a10, a1, UEXC_a10 - s32i a11, a1, UEXC_a11 - s32i a12, a1, UEXC_a12 - s32i a13, a1, UEXC_a13 - s32i a14, a1, UEXC_a14 - s32i a15, a1, UEXC_a15 -# if XTOS_DEBUG_PC - // TODO: setup return PC for call traceback through interrupt dispatch -# endif - - rsync // wait for WSR to PS to complete - -#else /* ! __XTENSA_CALL0_ABI__ */ - -# if XTOS_CNEST - l32i a2, a1, ESF_TOTALSIZE-20 // save nested-C-func call-chain ptr -# endif - addi a1, a1, ESF_TOTALSIZE // restore sp (dealloc ESF) for sane stack again - rsync // wait for WSR to PS to complete - - /* HERE: we can SAFELY get window overflows. - * - * From here, registers a4..a15 automatically get spilled if needed. - * They become a0..a11 after the ENTRY instruction. - * Currently, we don't check whether or not these registers - * get spilled, so we must save and restore any that we - * modify. We've already saved a4 and a5 - * which we modify as part of the pseudo-CALL. - * - * IMPLEMENTATION NOTE: - * - * The pseudo-CALL below effectively saves registers a2..a3 so - * that they are available again after the corresponding - * RETW when returning from the exception handling. We - * could choose to put something like EPC1 or PS in - * there, so they're available more quickly when - * restoring. HOWEVER, exception handlers may wish to - * change such values, or anything on the exception stack - * frame, and expect these to be restored as modified. - * - * NOTA: future: figure out what's the best thing to put - * in a2 and a3. (candidate: a4 and a5 below; but what - * if exception handler manipulates ARs, as in a syscall - * handler.... oh well) - * - * - * Now do the pseudo-CALL. - * Make it look as if the code that got the exception made a - * CALL4 to the exception handling code. (We call - * this the "pseudo-CALL".) - * - * This pseudo-CALL is important and done this way: - * - * 1. There are only three ways to safely update the stack pointer - * in the windowed ABI, such that window exceptions work correctly: - * (a) spill all live windows to stack then switch to a new stack - * (or, save the entire address register file and window - * registers, which is likely even more expensive) - * (b) use MOVSP (or equivalent) - * (c) use ENTRY/RETW - * Doing (a) is excessively expensive, and doing (b) here requires - * copying 16 bytes back and forth which is also time-consuming; - * whereas (c) is very efficient, so that's what we do here. - * - * 2. Normally we cannot do a pseudo-CALL8 or CALL12 here. - * According to the - * windowed ABI, a function must allocate enough space - * for the largest call that it makes. However, the - * pseudo-CALL is executed in the context of the - * function that happened to be executing at the time - * the interrupt was taken, and that function might or - * might not have allocated enough stack space for a - * CALL8 or a CALL12. If we try doing a pseudo-CALL8 - * or -CALL12 here, we corrupt the stack if the - * interrupted function happened to not have allocated - * space for such a call. - * - * 3. We set the return PC, but it's not strictly - * necessary for proper operation. It does make - * debugging, ie. stack tracebacks, much nicer if it - * can point to the interrupted code (not always - * possible, eg. if interrupted code is in a different - * GB than the interrupt handling code, which is - * unlikely in a system without protection where - * interrupt handlers and general application code are - * typically linked together). - * - * IMPORTANT: Interrupts must stay disabled while doing the pseudo-CALL, - * or at least until after the ENTRY instruction, because SP has been - * restored to its original value that does not reflect the exception - * stack frame's allocation. An interrupt taken here would - * corrupt the exception stack frame (ie. allocate another over it). - * (High priority interrupts can remain enabled, they save and restore - * all of their state and use their own stack or save area.) - * For the same reason, we mustn't get any exceptions in this code - * (other than window exceptions where noted) until ENTRY is done. - */ - - // HERE: may get a single window overflow (caused by the following instruction). - -# if XTOS_DEBUG_PC - movi a4, 0xC0000000 // [for debug] for return PC computation below - or a3, a4, a3 // [for debug] set upper two bits of return PC - addx2 a4, a4, a3 // [for debug] clear upper bit -# else - movi a4, 0 // entry cannot cause overflow, cause it here -# endif - - _entry a1, ESF_TOTALSIZE // as if after a CALL4 (PS.CALLINC set to 1 above) - - /* - * The above ENTRY instruction does a number of things: - * - * 1. Because we're emulating CALL4, the ENTRY rotates windows - * forward by 4 registers (as per 'ROTW +1'), so that - * a4-a15 became a0-a11. So now: a0-a11 are part of - * the interrupted context to be preserved. a0-a1 - * were already saved above when they were a4-a5. - * a12-a15 are free to use as they're NOT part of the - * interrupted context. We don't need to save/restore - * them, and they will get spilled if needed. - * - * 2. Updates SP (new a1), allocating the exception stack - * frame in the new window, preserving the old a1 in - * the previous window. - * - * 3. The underscore prefix prevents the assembler from - * automatically aligning the ENTRY instruction on a - * 4-byte boundary, which could create a fatal gap in - * the instruction stream. - * - * At this point, ie. before we re-enable interrupts, we know the caller is - * always live so we can safely modify a1 without using MOVSP (we can use MOVSP - * but it will never cause an ALLOCA or underflow exception here). - * So this is a good point to modify the stack pointer if we want eg. to - * switch to an interrupt stack (if we do, we need to save the current SP - * because certain things have been saved to that exception stack frame). - * We couldn't do this easily before ENTRY, where the caller wasn't - * necessarily live. - * - * NOTE: We don't switch to an interrupt stack here, because exceptions - * are generally caused by executing code -- so we handle exceptions in - * the context of the thread that cause them, and thus remain on the same - * stack. This means a thread's stack must be large enough to handle - * the maximum level of nesting of exceptions that the thread can cause. - */ - - // NOTA: exception handlers for certain causes may need interrupts to be kept - // disabled through their dispatch, so they can turn them off themselves at - // the right point (if at all), eg. to save critical state unknown to this - // code here, or for some recovery action that must be atomic with respect - // to interrupts.... - // - // Perhaps two versions of this assembly-level handler are needed, one that restores - // interrupts to what they were before the exception was taken (as here) - // and one that ensures at least low-priority interrupts are kept disabled? - // NOTA: For now, always enable interrupts here. - - /* - * Now we can enable interrupts. - * (Pseudo-CALL is complete, and SP reflects allocation of exception stack frame.) - */ - -#endif /* __XTENSA_CALL0_ABI__ */ - - -#if XCHAL_HAVE_INTERRUPTS - rsr.sar a12 - // Restore PS.INTLEVEL to its saved value (re-enables interrupts - // if they were enabled before taking the exception): - l32i a13, a1, UEXC_ps - rsr.ps a14 - extui a13, a13, 0, 4 // extract saved PS.INTLEVEL - extui a15, a14, 0, 4 // extract current PS.INTLEVEL - xor a14, a14, a15 // clear a14.INTLEVEL - xor a14, a14, a13 // replace with saved PS.INTLEVEL - wsr.ps a14 // restore PS.INTLEVEL -#else - rsr.sar a12 -#endif - - movi a13, xtos_c_handler_table // &table - l32i a15, a1, UEXC_exccause // arg2: exccause - - s32i a12, a1, UEXC_sar - save_loops_mac16 a1, a12, a14 // save LOOP & MAC16 regs, if configured - - addx4 a12, a15, a13 // a12 = table[exccause] - l32i a12, a12, 0 // ... - .global _GeneralException -_GeneralException: // this label makes tracebacks through exceptions look nicer - -#ifdef __XTENSA_CALL0_ABI__ - .global _GeneralExceptionFrameSize - .set _GeneralExceptionFrameSize, ESF_TOTALSIZE - .global _GeneralExceptionRegisterSaveOffset - .set _GeneralExceptionRegisterSaveOffset, UEXC_a0 - mov a2, a1 // arg1: exception parameters - mov a3, a15 // arg2: exccause - beqz a12, 1f // null handler => skip call - callx0 a12 // call C exception handler for this exception -#else - mov a14, a1 // arg1: exception parameters - // mov a15, a15 // arg2: exccause, already in a15 - beqz a12, 1f // null handler => skip call - callx12 a12 // call C exception handler for this exception -#endif - .size _GeneralException, . - _GeneralException -1: - // Now exit the handler. - - - // Restore special registers - - restore_loops_mac16 a1, a13, a14, a15 // restore LOOP & MAC16 regs, if configured - l32i a14, a1, UEXC_sar - - /* - * Disable interrupts while returning from the pseudo-CALL setup above, - * for the same reason they were disabled while doing the pseudo-CALL: - * this sequence restores SP such that it doesn't reflect the allocation - * of the exception stack frame, which we still need to return from - * the exception. - */ - -#if XCHAL_HAVE_INTERRUPTS - rsil a12, XCHAL_EXCM_LEVEL -#endif - wsr.sar a14 - - movi a0, _xtos_return_from_exc -#ifdef __XTENSA_CALL0_ABI__ - jx a0 -#else /* ! __XTENSA_CALL0_ABI__ */ - /* Now return from the pseudo-CALL from the interrupted code, to rotate - * our windows back... */ - - movi a13, 0xC0000000 - //movi a13, 3 - //slli a13, a13, 30 - or a0, a0, a13 // set upper two bits - addx2 a0, a13, a0 // clear upper bit - retw -#endif /* ! __XTENSA_CALL0_ABI__ */ - - /* FIXME: what about _GeneralException ? */ - - .size xtos_c_wrapper_handler, . - xtos_c_wrapper_handler - - -#endif /* XCHAL_HAVE_XEA2 && XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/xea2/exc-return.S b/src/arch/xtensa/xtos/xea2/exc-return.S deleted file mode 100644 index 083d78206905..000000000000 --- a/src/arch/xtensa/xtos/xea2/exc-return.S +++ /dev/null @@ -1,114 +0,0 @@ -// exc-return.S - Shared exception/interrupt return code - -// Copyright (c) 2002-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA2 && XCHAL_HAVE_EXCEPTIONS - - .text - .align 4 - .global _xtos_return_from_exc -_xtos_return_from_exc: - -#ifdef __XTENSA_CALL0_ABI__ - - l32i a0, a1, UEXC_a0 // restore general registers, pc, ps - l32i a4, a1, UEXC_a4 - l32i a5, a1, UEXC_a5 - l32i a6, a1, UEXC_a6 - l32i a7, a1, UEXC_a7 - l32i a8, a1, UEXC_a8 - l32i a9, a1, UEXC_a9 - l32i a10, a1, UEXC_a10 - l32i a11, a1, UEXC_a11 - l32i a12, a1, UEXC_a12 - l32i a13, a1, UEXC_a13 - l32i a14, a1, UEXC_a14 - l32i a15, a1, UEXC_a15 - - l32i a2, a1, UEXC_pc - l32i a3, a1, UEXC_ps - wsr.epc1 a2 - wsr.ps a3 - - l32i a2, a1, UEXC_a2 - l32i a3, a1, UEXC_a3 - - rsync // wait for WSR to PS to complete - - addi a1, a1, ESF_TOTALSIZE // restore sp - - rfe - -#else /* ! __XTENSA_CALL0_ABI__ */ - - - // Here we rotated back by N registers, to the interrupted code's register window. - // NOTA: a2 and a3 might contain something useful, but we haven't determined - // what that might be yet (for now, a2 contains nested-C-func call-chain ptr). - - // NOTE: a5 still contains the exception window's exception stack frame pointer. - -# if XTOS_CNEST - s32i a2, a5, ESF_TOTALSIZE-20 // restore nested-C-func call-chain ptr -# endif - l32i a2, a5, UEXC_ps - l32i a3, a5, UEXC_pc - wsr.ps a2 // this sets PS.EXCM - - l32i a2, a5, UEXC_a2 - l32i a4, a5, UEXC_a4 - rsync // wait for WSR to PS to complete - - /* FIXME: Enabling this here may break task-engine builds - * because task engines have exceptions (sort of), but they do - * not have the EPC_1 special register. XCHAL_HAVE_INTERRUPTS - * is incorrect for normal configs without interrupts but with - * exceptions (we still need to restore EPC_1). The correct - * solution is to define XCHAL_HAVE_EXCEPTIONS more strictly - * to mean something like "Have exceptions with - * user/kernel/double vectors" so that task engines are - * excluded. This would be a change to - * . */ - - wsr.epc1 a3 - // HERE: - // - we cannot get window overflows anymore -- we're NOT in a valid windowed context - // - low-priority interrupts are still disabled - - // NOTE: we don't restore EXCCAUSE or EXCVADDR, not needed. - - // Restore a3, a5: - l32i a3, a5, UEXC_a3 - l32i a5, a5, UEXC_a5 - - rfe_rfue - -#endif /* __XTENSA_CALL0_ABI__ */ - - .size _xtos_return_from_exc, . - _xtos_return_from_exc - -#endif /* XCHAL_HAVE_XEA2 && XCHAL_HAVE_EXCEPTIONS */ - diff --git a/src/arch/xtensa/xtos/xea2/int-lowpri-dispatcher.S b/src/arch/xtensa/xtos/xea2/int-lowpri-dispatcher.S deleted file mode 100644 index 02453b5c5a4d..000000000000 --- a/src/arch/xtensa/xtos/xea2/int-lowpri-dispatcher.S +++ /dev/null @@ -1,324 +0,0 @@ -// Level-one interrupt dispatcher (user vectored handler) - -// Copyright (c) 1999-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include - -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA2 && XCHAL_HAVE_EXCEPTIONS && XCHAL_HAVE_INTERRUPTS - -#define _INTERRUPT_LEVEL 1 - - - // NOTE: something equivalent to the following vector is executed - // before entering this handler (see user-vector.S). -//_UserExceptionVector: -// addi a1, a1, -ESF_TOTALSIZE // allocate exception stack frame, etc. -// s32i a2, a1, UEXC_a2 -// s32i a3, a1, UEXC_a3 -// movi a3, xtos_exc_handler_table -// rsr.exccause a2 -// addx4 a2, a2, a3 -// l32i a2, a2, 0 -// s32i a4, a1, UEXC_a4 -// jx a2 // jump to cause-specific handler - - .global _need_user_vector_ // pull-in real user vector (tiny LSP) - - .text - .align 4 - .global _xtos_l1int_handler -_xtos_l1int_handler: - // HERE: a2, a3, a4 have been saved to exception stack frame allocated with a1 (sp). - - s32i a5, a1, UEXC_a5 // a5 will get clobbered by ENTRY after pseudo-CALL4 - // (a4..a15 spilled as needed; save if modified) - -#if HAVE_XSR - movi a2, PS_WOECALL4_ABI|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) - xsr.ps a2 - s32i a2, a1, UEXC_ps -#else - rsr.ps a2 - s32i a2, a1, UEXC_ps - movi a2, PS_WOECALL4_ABI|PS_UM|PS_INTLEVEL(XCHAL_EXCM_LEVEL) - wsr.ps a2 -#endif - rsync - - /* store pc */ - rsr.epc1 a2 - s32i a2, a1, UEXC_pc - - /* store rest of the registers */ - s32i a0, a1, UEXC_a0 - s32i a6, a1, UEXC_a6 - s32i a7, a1, UEXC_a7 - s32i a8, a1, UEXC_a8 - s32i a9, a1, UEXC_a9 - s32i a10, a1, UEXC_a10 - s32i a11, a1, UEXC_a11 - s32i a12, a1, UEXC_a12 - s32i a13, a1, UEXC_a13 - s32i a14, a1, UEXC_a14 - s32i a15, a1, UEXC_a15 - - /* store current sp */ - xtos_addr_percore a2, xtos_saved_sp - s32i a1, a2, 0 - - /* store current task sp */ - xtos_task_ctx_percore a2 - beqz a2, no_context - s32i a1, a2, TC_stack_pointer - -no_context: -# if XTOS_CNEST - l32i a2, a1, ESF_TOTALSIZE-20 // save nested-C-func call-chain ptr -# endif - addi a1, a1, ESF_TOTALSIZE -# if XTOS_DEBUG_PC - rsr.epc1 a4 // [for debug] get return PC - movi a5, 0xC0000000 // [for debug] setup call size... - or a4, a5, a4 // [for debug] set upper two bits of return PC - addx2 a4, a5, a4 // [for debug] clear upper bit -# else - movi a4, 0 /* terminate stack frames, overflow check */ -# endif - _entry a1, ESF_TOTALSIZE - -/* Reset the interrupt level to xtos locklevel (lvl 6 on most systems) */ - - rsil a15, XTOS_LOCKLEVEL - -/* Get bit list of pending interrupts at the current interrupt priority level. - * If bit list is empty, interrupt is spurious (can happen if a - * genuine interrupt brings control this direction, but the interrupt - * goes away before we read the INTERRUPT register). Also save off - * sar, loops, mac16 registers and coprocessors. */ - -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp0 - xchal_cp0_store a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - mov a11, a1 - addi a11, a11, UEXC_cp1 - xchal_cp1_store a11, a12, a13, a14, a15 -#endif -#endif - rsr.interrupt a15 - rsr.intenable a12 - movi a13, XCHAL_INTLEVEL1_MASK - and a15, a15, a12 - and a15, a15, a13 - rsr.sar a14 - s32i a14, a1, UEXC_sar - save_loops_mac16 a1, a13, a14 - - /* switch to interrupt stack */ - xtos_int_stack_addr_percore a13, _INTERRUPT_LEVEL, xtos_stack_for_interrupt - s32i a1, a13, 0 - addi a1, a13, SOF_STACK_SIZE - - _beqz a15, LABEL(spurious,int) - - /* set stack base and size for interrupt context */ - xtos_addr_percore a11, xtos_interrupt_ctx - s32i a13, a11, TC_stack_base - movi a13, SOF_STACK_SIZE - s32i a13, a11, TC_stack_size - - /* save task context */ - xtos_task_ctx_percore a13 - xtos_store_percore a13, a14, xtos_saved_ctx - - /* set interrupt task context */ - xtos_task_ctx_store_percore a11, a14 - - xtos_on_wakeup - -/* Loop to handle all pending interrupts. */ - -LABEL(.L1,_loop0): - neg a12, a15 - and a12, a12, a15 - wsr.intclear a12 // clear if edge-trig or s/w or wr/err (else no effect) -#if CONFIG_MULTICORE - xtos_addr_percore a13, xtos_interrupt_table -#else - movi a13, xtos_interrupt_table -#endif - find_ms_setbit a15, a12, a14, 0 - mapint a15 - addx8 a12, a15, a13 - l32i a13, a12, XIE_HANDLER - l32i a14, a12, XIE_ARG - mov a15, a1 - callx12 a13 - - rsr.interrupt a15 - rsr.intenable a12 - movi a13, XCHAL_INTLEVEL1_MASK - and a15, a15, a12 - and a15, a15, a13 - _bnez a15, LABEL(.L1,_loop0) - -/* Restore everything, and return. */ - - /* restore task context if needed */ - xtos_task_ctx_percore a11 - xtos_addr_percore a12, xtos_interrupt_ctx - bne a11, a12, restore_cp - xtos_addr_percore a12, xtos_saved_ctx - xtos_task_ctx_store_percore a12, a11 - -restore_cp: -#if __XCC__ -#if (XCHAL_CP_MASK & CP0_MASK) - xtos_task_ctx_percore a11 - beqz a11, no_context_2 - l32i a11, a11, TC_stack_pointer - addi a11, a11, UEXC_cp0 - xchal_cp0_load a11, a12, a13, a14, a15 -#endif -#if (XCHAL_CP_MASK & CP1_MASK) - xtos_task_ctx_percore a11 - beqz a11, no_context_2 - l32i a11, a11, TC_stack_pointer - addi a11, a11, UEXC_cp1 - xchal_cp1_load a11, a12, a13, a14, a15 -#endif -#endif - -no_context_2: - restore_loops_mac16 a1, a13, a14, a15 - l32i a14, a1, UEXC_sar -LABEL(spurious,int): - -#if XCHAL_HAVE_EXCLUSIVE - // Clear exclusive monitors. - clrex -#endif - - movi a0, LABEL(return,from_exc) - movi a13, 0xC0000000 - wsr.sar a14 - or a0, a0, a13 - addx2 a0, a13, a0 -# if _INTERRUPT_LEVEL < XCHAL_EXCM_LEVEL -/* Raise the interrupt mask before - * returning to avoid a race condition where we deallocate the - * exception stack frame but still have more register values to - * restore from it. */ - rsil a14, XCHAL_EXCM_LEVEL -# endif - retw -LABEL(return,from_exc): - /* a5 contains interrupt stack pointer */ - addi a5, a5, -SOF_STACK_SIZE - l32i a5, a5, 0 - -# if XTOS_CNEST - s32i a2, a5, ESF_TOTALSIZE-20 // restore nested-C-func call-chain ptr -# endif - - /* store sp after returning from handler */ - s32i a1, a5, UEXC_a1 - -restore: - /* load registers for window spill */ - l32i a4, a5, UEXC_a4 - l32i a6, a5, UEXC_a6 - l32i a7, a5, UEXC_a7 - l32i a8, a5, UEXC_a8 - l32i a9, a5, UEXC_a9 - l32i a10, a5, UEXC_a10 - l32i a11, a5, UEXC_a11 - l32i a12, a5, UEXC_a12 - l32i a13, a5, UEXC_a13 - l32i a14, a5, UEXC_a14 - - /* check if switch is needed */ - xtos_addr_percore a2, xtos_saved_sp - xtos_task_ctx_percore a1 - beqz a1, noSwitch - l32i a1, a1, TC_stack_pointer - l32i a0, a2, 0 - beq a0, a1, noSwitch - -doSwitch: - /* store new task sp */ - s32i a1, a2, 0 - - /* restore sp of task being preempted */ - l32i a1, a5, UEXC_a1 - - /* spill register windows to the stack */ - rsr.ps a2 - movi a3, PS_WOE_MASK - xor a2, a2, a3 - wsr.ps a2 - - call0 xthal_window_spill_nw - - /* restore previous ps */ - rsr.ps a2 - movi a3, PS_WOE_MASK - or a2, a2, a3 - wsr.ps a2 - - /* change stack */ - xtos_addr_percore a5, xtos_saved_sp - l32i a5, a5, 0 - j restore - -noSwitch: - /* restore ps and pc */ - l32i a0, a5, UEXC_ps - wsr.ps a0 - rsync - l32i a0, a5, UEXC_pc - wsr.epc1 a0 - - /* restore sar, loops and mac16 registers */ - l32i a0, a5, UEXC_sar - wsr.sar a0 - restore_loops_mac16 a5, a0, a1, a2 - - /* restore rest of the registers */ - l32i a0, a5, UEXC_a0 - l32i a1, a5, UEXC_a1 - l32i a2, a5, UEXC_a2 - l32i a3, a5, UEXC_a3 - l32i a15, a5, UEXC_a15 - l32i a5, a5, UEXC_a5 - rfe - - /* FIXME: what about _LevelOneInterrupt ? */ - .size _xtos_l1int_handler, . - _xtos_l1int_handler - -#endif /* XCHAL_HAVE_XEA2 && XCHAL_HAVE_EXCEPTIONS && XCHAL_HAVE_INTERRUPTS */ diff --git a/src/arch/xtensa/xtos/xea2/intlevel-restore.S b/src/arch/xtensa/xtos/xea2/intlevel-restore.S deleted file mode 100644 index 9c4f4b0b65fd..000000000000 --- a/src/arch/xtensa/xtos/xea2/intlevel-restore.S +++ /dev/null @@ -1,91 +0,0 @@ -// intlevel-restore.S - Interrupt related assembler code - _xtos_restore_intlevel - -// Copyright (c) 2004-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA2 - -/*************************************************************************** - * void _xtos_restore_intlevel(unsigned restoreval); - * - * _xtos_restore_intlevel() restores the current interrupt level - * according to a value returned by _xtos_set_intlevel() or - * _xtos_set_min_intlevel() (or one of the corresponding macros). - * - * NOTE: In XEA2, this function may restore the entire PS register, not - * just the PS.INTLEVEL field. If some other PS field(s) must be changed - * and kept intact across restoring PS.INTLEVEL (this is generally unlikely), - * use the XTOS_RESTORE_JUST_INTLEVEL() macro instead (which is slower). - * - * NOTE: The macro form of this function (XTOS_RESTORE_INTLEVEL()) - * is recommended (for XEA2 configs or where the config is unknown) - * because it may be more efficient. - */ - - .text - .global _xtos_restore_intlevel - .type _xtos_restore_intlevel,@function - .align 4 -_xtos_restore_intlevel: - abi_entry -# if XCHAL_HAVE_INTERRUPTS - wsr a2, PS // restore PS - rsync // wait for WSR to PS to complete -# endif - abi_return - .size _xtos_restore_intlevel, . - _xtos_restore_intlevel - - - -/*************************************************************************** - * _xtos_set_vpri() is used to set the current virtual priority from C code; - * it can be called from the application or from a C interrupt handler. - */ - - .global _xtos_set_vpri - .type _xtos_set_vpri,@function - .align 4 -_xtos_set_vpri: - abi_entry -#if XCHAL_HAVE_INTERRUPTS && XTOS_VIRTUAL_INTENABLE - mov a3, a2 -#if CONFIG_MULTICORE - xtos_addr_percore a4, xtos_intstruct -#else - movi a4, _xtos_intstruct -#endif - xtos_lock a7 // MUST USE highest address register of function to avoid window overflows in critical section - l32i a2, a4, XTOS_VPRI_ENABLED_OFS // return old xtos_vpri_enabled (current vpri) - l32i a5, a4, XTOS_ENABLED_OFS // a3 = xtos_enabled - s32i a3, a4, XTOS_VPRI_ENABLED_OFS // set new xtos_vpri_enabled (current vpri) - and a5, a5, a3 // a5 = xtos_enabled & xtos_vpri_enabled - wsr a5, INTENABLE - xtos_unlock a7 -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - .size _xtos_set_vpri, . - _xtos_set_vpri - -#endif /* XEA2 */ - diff --git a/src/arch/xtensa/xtos/xea2/intlevel-set.S b/src/arch/xtensa/xtos/xea2/intlevel-set.S deleted file mode 100644 index 30a82c336fb0..000000000000 --- a/src/arch/xtensa/xtos/xea2/intlevel-set.S +++ /dev/null @@ -1,63 +0,0 @@ -// intlevel-set.S - Interrupt related assembler code - _xtos_set_intlevel - -// Copyright (c) 2004-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA2 - - -/*************************************************************************** - * unsigned _xtos_set_intlevel(int intlevel); - * - * _xtos_set_intlevel() is used to set the current priority from C code; - * it can be called from the application or from a C interrupt handler. - * - * NOTE: This version allows the 'intlevel' parameter to be computed - * at run-time, and thus is longer. It is much more efficient, and - * highly recommented, to use the XTOS_SET_INTLEVEL(intlevel) macro instead - * (which requires a constant intlevel). - */ - - .text - .align 4 - .global _xtos_set_intlevel - .type _xtos_set_intlevel,@function -_xtos_set_intlevel: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - /* In XEA2, we can simply safely set PS.INTLEVEL directly: */ - extui a3, a2, 0, 4 // keep only INTLEVEL bits of parameter - rsr.ps a2 // get old (current) PS.INTLEVEL - movi a4, ~0xF - and a4, a4, a2 // mask out PS.INTLEVEL - or a4, a4, a3 // insert requested INTLEVEL - wsr.ps a4 // update PS.INTLEVEL - rsync // wait for WSR to PS to complete -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - - .size _xtos_set_intlevel, . - _xtos_set_intlevel - -#endif /* XEA2 */ - diff --git a/src/arch/xtensa/xtos/xea2/intlevel-setmin.S b/src/arch/xtensa/xtos/xea2/intlevel-setmin.S deleted file mode 100644 index 0d9f70e18b96..000000000000 --- a/src/arch/xtensa/xtos/xea2/intlevel-setmin.S +++ /dev/null @@ -1,68 +0,0 @@ -// intlevel-setmin.S - Interrupt related assembler code - _xtos_set_min_intlevel - -// Copyright (c) 2004-2015 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include "../xtos-internal.h" - -#if XCHAL_HAVE_XEA2 - - -/*************************************************************************** - * unsigned _xtos_set_min_intlevel(int intlevel); - * - * _xtos_set_min_intlevel() is identical to _xtos_set_intlevel() except - * that it will not lower the current interrupt level. Instead, - * it ensures that the current interrupt level is at least as high - * as specified. - * - * NOTE: This version allows the 'intlevel' parameter to be computed - * at run-time, and thus is longer. It is much more efficient, and - * highly recommented, to use the XTOS_SET_MIN_INTLEVEL(intlevel) macro instead - * (which requires a constant intlevel). - */ - - .text - .align 4 - .global _xtos_set_min_intlevel - .type _xtos_set_min_intlevel,@function -_xtos_set_min_intlevel: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - /* In XEA2, we can simply safely set PS.INTLEVEL directly: */ - extui a3, a2, 0, 4 // keep only INTLEVEL bits of parameter - rsr.ps a2 // get old (current) PS.INTLEVEL - movi a4, ~0xF - extui a5, a2, 0, 4 // look at old PS.INTLEVEL - sub a5, a3, a5 // new.intlevel - old.intlevel - and a4, a4, a2 // mask out PS.INTLEVEL - or a4, a4, a3 // insert requested INTLEVEL - movltz a4, a2, a5 // keep same PS if already higher than requested - wsr.ps a4 // update PS.INTLEVEL - rsync // wait for WSR to PS to complete -#endif /*XCHAL_HAVE_INTERRUPTS*/ - abi_return - - .size _xtos_set_min_intlevel, . - _xtos_set_min_intlevel - -#endif /* XEA2 */ - diff --git a/src/arch/xtensa/xtos/xea2/reloc-vectors.S b/src/arch/xtensa/xtos/xea2/reloc-vectors.S deleted file mode 100644 index 7e9c1760495b..000000000000 --- a/src/arch/xtensa/xtos/xea2/reloc-vectors.S +++ /dev/null @@ -1,120 +0,0 @@ -// reloc-vector.S - Relocatable Vectors section -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/xea2/reloc-vectors.S#1 $ - -// Copyright (c) 2007-2017 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -/* - * This file is only used if the relocatable vectors option is enabled. - */ - - -#include -#include - -#if XCHAL_HAVE_VECBASE - - .section .RelocatableVectors.text, "ax" - - .global _RelocVectors -_RelocVectors: - -//if XCHAL_RESET_VECBASE_OVERLAP ... -# if XSHAL_VECTORS_PACKED \ - && (XCHAL_RESET_VECTOR0_VADDR == XCHAL_VECBASE_RESET_VADDR \ - || XCHAL_RESET_VECTOR1_VADDR == XCHAL_VECBASE_RESET_VADDR) -# define JUMP_TO_RESET 1 - j .Ljump_to_reset -# endif - -# if XCHAL_HAVE_WINDOWED -# define NO_SECTION_DIRECTIVES 1 -# define WINDOW_BASE_VECOFS 0 -# include "window-vectors.S" -# endif - -#if XCHAL_HAVE_DEBUG && XCHAL_HAVE_EXCEPTIONS -# if XCHAL_DEBUGLEVEL == 2 -# define _Level2Vector _DebugExceptionVector -# elif XCHAL_DEBUGLEVEL == 3 -# define _Level3Vector _DebugExceptionVector -# elif XCHAL_DEBUGLEVEL == 4 -# define _Level4Vector _DebugExceptionVector -# elif XCHAL_DEBUGLEVEL == 5 -# define _Level5Vector _DebugExceptionVector -# elif XCHAL_DEBUGLEVEL == 6 -# define _Level6Vector _DebugExceptionVector -# endif -#endif - -# if XCHAL_HAVE_INTERRUPTS -# if XCHAL_NUM_INTLEVELS >= 2 - .org XCHAL_INTLEVEL2_VECOFS - j _Level2Vector -# endif -# if XCHAL_NUM_INTLEVELS >= 3 - .org XCHAL_INTLEVEL3_VECOFS - j _Level3Vector -# endif -# if XCHAL_NUM_INTLEVELS >= 4 - .org XCHAL_INTLEVEL4_VECOFS - j _Level4Vector -# endif -# if XCHAL_NUM_INTLEVELS >= 5 - .org XCHAL_INTLEVEL5_VECOFS - j _Level5Vector -# endif -# if XCHAL_NUM_INTLEVELS >= 6 - .org XCHAL_INTLEVEL6_VECOFS - j _Level6Vector -# endif -# if XCHAL_HAVE_NMI - .org XCHAL_NMI_VECOFS - j _NMIExceptionVector -# endif -# endif -# if XCHAL_HAVE_EXCEPTIONS - .org XCHAL_KERNEL_VECOFS - j _KernelExceptionVector - .org XCHAL_USER_VECOFS - j _UserExceptionVector - .org XCHAL_DOUBLEEXC_VECOFS - j _DoubleExceptionVector -# endif - -// Put literals here. - -// Put actual handlers here. - -# if JUMP_TO_RESET - .align 4 - .literal rvec, _ResetVector -.Ljump_to_reset: - l32r a2, rvec - jx a2 -# endif - - .size _RelocVectors, . - _RelocVectors - - .text - -#endif /* XCHAL_HAVE_VECBASE */ - diff --git a/src/arch/xtensa/xtos/xea2/switch_context.S b/src/arch/xtensa/xtos/xea2/switch_context.S deleted file mode 100644 index b4ed2fafe2af..000000000000 --- a/src/arch/xtensa/xtos/xea2/switch_context.S +++ /dev/null @@ -1,94 +0,0 @@ -/* switch_contexts.S - setup for multiple contexts */ - -/* - * Copyright (c) 2003-2010 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#include -#include - -#if XCHAL_NUM_CONTEXTS > 1 - - -/* - * void _xtos_setup_context(int context_num, StartInfo *info); - */ - .align 4 - .global _xtos_setup_context - .type _xtos_setup_context,@function -_xtos_setup_context: - abi_entry -#if XCHAL_HAVE_INTERRUPTS - rsil a5, 15 /* disable interrupts so we can use EXCSAVE_1 */ -#else - rsr.ps a5 /* just read PS */ -#endif - wsr.excsave1 a3 /* save pointer to new context info */ - s32i a5, a3, INFO_prevps /* save previous PS */ - movi a4, ~0x01F00000 /* mask out PS.CTXT */ - slli a2, a2, 20 /* shift up new PS.CTXT value */ - and a4, a5, a4 - or a4, a4, a2 /* new PS value */ - wsr.ps a4 - rsync - /* We're now in the new context! */ - movi a0, 0 - movi a1, 1 - wsr.windowstart a1 - wsr.windowbase a0 - rsync - rsr.excsave1 a9 /* get pointer to context info */ - movi a0, 0 /* terminate call frames */ - l32i a1, a9, INFO_sp /* get stack pointer */ - l32i a10, a9, INFO_arg1 /* get start function's arguments... */ - l32i a8, a9, INFO_funcpc /* get start function's address */ - /* Okay, now switch back to context zero: */ - l32i a9, a9, INFO_prevps /* retrieve previous PS */ - wsr.ps a9 - rsync - /* Back to original context! */ - abi_return - - .size _xtos_setup_context, . - _xtos_setup_context - - - - /* - * This is the first thing to be executed in the new context - * by explicit setting of PC: - */ - .align 4 - .global _xtos_start_context -_xtos_start_context: -#ifdef __XTENSA_CALL0_ABI__ - Crash the assembler here: I think this is wrong. - callx0 a8 -#else - callx8 a8 -#endif -1: nop - j 1b /* do nothing until context 0 exits */ - .size _xtos_start_context, . - _xtos_start_context - - -#endif /* XCHAL_NUM_CONTEXTS > 1 */ - diff --git a/src/arch/xtensa/xtos/xea2/window-vectors.S b/src/arch/xtensa/xtos/xea2/window-vectors.S deleted file mode 100644 index b6bd0f0974dd..000000000000 --- a/src/arch/xtensa/xtos/xea2/window-vectors.S +++ /dev/null @@ -1,338 +0,0 @@ -// window-vectors-xea2.S - Register Window Overflow/Underflow Handlers for XEA2 -// $Id: //depot/rel/Foxhill/dot.8/Xtensa/OS/xtos/xea2/window-vectors.S#1 $ - -// Copyright (c) 1999-2016 Tensilica Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be included -// in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -#include -#include - -#if XCHAL_HAVE_XEA2 && XCHAL_HAVE_WINDOWED && !defined(__XTENSA_CALL0_ABI__) - -# ifndef NO_SECTION_DIRECTIVES -// Exports -.global _WindowOverflow4 -.global _WindowUnderflow4 -.global _WindowOverflow8 -.global _WindowUnderflow8 -.global _WindowOverflow12 -.global _WindowUnderflow12 -.global _xtos_alloca_handler - - // Note: the current window exception vectors do not generate any - // literals. Hence the literal_prefix directive is not necessary. - // Specifying it "just in case" creates an empty section (named - // ".WindowVectors.literal") which can in some cases cause linking - // problems (the linker scripts don't place it anywhere). - // So leave it commented out: - // - //.begin literal_prefix .WindowVectors - - .section .WindowVectors.text, "ax" -# endif - - -// -// GENERAL NOTES: -// -// These window exception handlers need not be modified. -// They are specific to the windowed call ABI only. -// -// Underflow Handlers: -// -// The underflow handler for returning from call[i+1] to call[i] -// must preserve all the registers from call[i+1]'s window. -// In particular, a0 and a1 must be preserved because the RETW instruction -// will be reexecuted (and may even underflow again if an intervening -// exception has flushed call[i]'s registers). -// Registers a2 and up may contain return values. -// -// The caller could also potentially assume that the callee's a0 and a1 -// (its own a4&a5 if call4, a8&a9 if call8, a12&a13 if call12) -// are correct for whatever reason (not a clean thing to do in general, -// but if it's possible, unless the ABI explicitly prohibits it, -// it will eventually be done :) -- whether the the ABI needs to -// prohibit this is a different question). -// -// Timing of Handlers: -// -// Here is an overview of the overhead of taking a window exception, -// ie. the number of additional cycles taken relative to case where -// an exception is not taken. -// NOTE: these numbers do not take into account any cache misses, -// write buffer stalls, or other external stalls, if they occur. -// The totals consist of 5 cycles to enter the handler (or 6 or 7 -// for optional longer pipelines in Xtensa LX), the number of instructions -// and interlocks (2nd and 3rd columns below), and 2 cycles jump delay -// on return (3 cycles for optional longer I-side pipeline in Xtensa LX): -// -// Instruction+bubbles Totals (5-stage) -// XEA1 XEA2 XEA1 XEA2 -// Overflow-4 7 5 14 12 -// Overflow-8 14 10 21 17 -// Overflow-12 18 14 25 21 -// Underflow-4 6 5 13 12 -// Underflow-8 14 10 21 17 -// Underflow-12 18 14 25 21 -// -// Underflow-8 15 12 25 22 (7-stage; could be made 1 less) -// Underflow-12 19 16 29 26 (7-stage; could be made 1 less) - -#ifndef WINDOW_BASE_VECOFS -#define WINDOW_BASE_VECOFS XCHAL_WINDOW_OF4_VECOFS -#endif - - -// 4-Register Window Overflow Vector (Handler) -// -// Invoked if a call[i] referenced a register (a4-a15) -// that contains data from ancestor call[j]; -// call[j] had done a call4 to call[j+1]. -// On entry here: -// window rotated to call[j] start point; -// a0-a3 are registers to be saved; -// a4-a15 must be preserved; -// a5 is call[j+1]'s stack pointer. - - .org XCHAL_WINDOW_OF4_VECOFS - WINDOW_BASE_VECOFS -_WindowOverflow4: - hw_erratum_487_fix - s32e a0, a5, -16 // save a0 to call[j+1]'s stack frame - s32e a1, a5, -12 // save a1 to call[j+1]'s stack frame - s32e a2, a5, -8 // save a2 to call[j+1]'s stack frame - s32e a3, a5, -4 // save a3 to call[j+1]'s stack frame - rfwo // rotates back to call[i] position - - .size _WindowOverflow4, . - _WindowOverflow4 - - -// ALLOCA exception handler -// -// NOTE: The alloca exception handler is squeezed in between the window exception -// handlers in order to save space, and also to allow short-range jumps to the -// window underflow handlers (see below for why). Because of the limited space in -// between the window handlers, this function is split into two to fit. -// -// Code written to the windowed ABI must use the MOVSP instruction to modify -// the stack pointer (except for startup code, which doesn't have a caller). -// The compiler uses MOVSP to allocate very large or variable size stack frames. -// MOVSP guarantees that the caller frame's a0-a3 registers, stored below the -// stack pointer, are moved atomically with respect to interrupts and exceptions -// to satisfy windowed ABI requirements. When user code executes the MOVSP -// instruction and the caller frame is on the stack rather than in the register -// file, the processor takes an ALLOCA exception. -// -// The XTOS user exception dispatcher allocates an exception frame on the -// stack and saves a2-a4 into that frame before calling us. So we need to -// restore those registers and deallocate the stack frame before jumping -// to the window underflow handler - which will restore the spilled registers -// back into the register file. -// The fact the alloca exception was taken means the registers associated with -// the base-save area have been spilled and will be restored by the underflow -// handler, so those 4 registers are available for scratch. - - .align 4 - -_xtos_alloca_handler: - - l32i a2, a1, UEXC_a2 // restore a2-a4 and deallocate frame - l32i a3, a1, UEXC_a3 - l32i a4, a1, UEXC_a4 - addi a1, a1, ESF_TOTALSIZE - wsr.excsave1 a0 // save a0 - rsr.windowbase a0 // grab WINDOWBASE before rotw changes it - rotw -1 // WINDOWBASE goes to a4, new a0-a3 are scratch - rsr.ps a2 - extui a3, a2, XCHAL_PS_OWB_SHIFT, XCHAL_PS_OWB_BITS - xor a3, a3, a4 // bits changed from old to current windowbase - j _xtos_alloca_2 // not enough room here... - - .size _xtos_alloca_handler, . - _xtos_alloca_handler - - -// 4-Register Window Underflow Vector (Handler) -// -// Invoked by RETW returning from call[i+1] to call[i] -// where call[i]'s registers must be reloaded (not live in ARs); -// call[i] had done a call4 to call[i+1]. -// On entry here: -// window rotated to call[i] start point; -// a0-a3 are undefined, must be reloaded with call[i].reg[0..3]; -// a4-a15 must be preserved (they are call[i+1].reg[0..11]); -// a5 is call[i+1]'s stack pointer. - - .org XCHAL_WINDOW_UF4_VECOFS - WINDOW_BASE_VECOFS -_WindowUnderflow4: - l32e a0, a5, -16 // restore a0 from call[i+1]'s stack frame - l32e a1, a5, -12 // restore a1 from call[i+1]'s stack frame - l32e a2, a5, -8 // restore a2 from call[i+1]'s stack frame - l32e a3, a5, -4 // restore a3 from call[i+1]'s stack frame - rfwu - - .size _WindowUnderflow4, . - _WindowUnderflow4 - - -// This is the second part of the alloca handler. - - .align 4 - -_xtos_alloca_2: - - rsr.excsave1 a4 // restore original a0 (now in a4) - slli a3, a3, XCHAL_PS_OWB_SHIFT - xor a2, a2, a3 // flip changed bits in old window base - wsr.ps a2 // update PS.OWB to new window base - rsync - _bbci.l a4, 31, _WindowUnderflow4 - rotw -1 // original a0 goes to a8 - _bbci.l a8, 30, _WindowUnderflow8 - rotw -1 - j _WindowUnderflow12 - - .size _xtos_alloca_2, . - _xtos_alloca_2 - - -// 8-Register Window Overflow Vector (Handler) -// -// Invoked if a call[i] referenced a register (a4-a15) -// that contains data from ancestor call[j]; -// call[j] had done a call8 to call[j+1]. -// On entry here: -// window rotated to call[j] start point; -// a0-a7 are registers to be saved; -// a8-a15 must be preserved; -// a9 is call[j+1]'s stack pointer. - - .org XCHAL_WINDOW_OF8_VECOFS - WINDOW_BASE_VECOFS -_WindowOverflow8: - hw_erratum_487_fix - s32e a0, a9, -16 // save a0 to call[j+1]'s stack frame - l32e a0, a1, -12 // a0 <- call[j-1]'s sp (used to find end of call[j]'s frame) - s32e a1, a9, -12 // save a1 to call[j+1]'s stack frame - s32e a2, a9, -8 // save a2 to call[j+1]'s stack frame - s32e a3, a9, -4 // save a3 to call[j+1]'s stack frame - s32e a4, a0, -32 // save a4 to call[j]'s stack frame - s32e a5, a0, -28 // save a5 to call[j]'s stack frame - s32e a6, a0, -24 // save a6 to call[j]'s stack frame - s32e a7, a0, -20 // save a7 to call[j]'s stack frame - rfwo // rotates back to call[i] position - - .size _WindowOverflow8, . - _WindowOverflow8 - - -// 8-Register Window Underflow Vector (Handler) -// -// Invoked by RETW returning from call[i+1] to call[i] -// where call[i]'s registers must be reloaded (not live in ARs); -// call[i] had done a call8 to call[i+1]. -// On entry here: -// window rotated to call[i] start point; -// a0-a7 are undefined, must be reloaded with call[i].reg[0..7]; -// a8-a15 must be preserved (they are call[i+1].reg[0..7]); -// a9 is call[i+1]'s stack pointer. - - .org XCHAL_WINDOW_UF8_VECOFS - WINDOW_BASE_VECOFS -_WindowUnderflow8: - l32e a0, a9, -16 // restore a0 from call[i+1]'s stack frame - l32e a1, a9, -12 // restore a1 from call[i+1]'s stack frame - l32e a2, a9, -8 // restore a2 from call[i+1]'s stack frame - l32e a7, a1, -12 // a7 <- call[i-1]'s sp (used to find end of call[i]'s frame) - l32e a3, a9, -4 // restore a3 from call[i+1]'s stack frame - l32e a4, a7, -32 // restore a4 from call[i]'s stack frame - l32e a5, a7, -28 // restore a5 from call[i]'s stack frame - l32e a6, a7, -24 // restore a6 from call[i]'s stack frame - l32e a7, a7, -20 // restore a7 from call[i]'s stack frame - rfwu - - .size _WindowUnderflow8, . - _WindowUnderflow8 - - -// 12-Register Window Overflow Vector (Handler) -// -// Invoked if a call[i] referenced a register (a4-a15) -// that contains data from ancestor call[j]; -// call[j] had done a call12 to call[j+1]. -// On entry here: -// window rotated to call[j] start point; -// a0-a11 are registers to be saved; -// a12-a15 must be preserved; -// a13 is call[j+1]'s stack pointer. - - .org XCHAL_WINDOW_OF12_VECOFS - WINDOW_BASE_VECOFS -_WindowOverflow12: - hw_erratum_487_fix - s32e a0, a13, -16 // save a0 to call[j+1]'s stack frame - l32e a0, a1, -12 // a0 <- call[j-1]'s sp (used to find end of call[j]'s frame) - s32e a1, a13, -12 // save a1 to call[j+1]'s stack frame - s32e a2, a13, -8 // save a2 to call[j+1]'s stack frame - s32e a3, a13, -4 // save a3 to call[j+1]'s stack frame - s32e a4, a0, -48 // save a4 to end of call[j]'s stack frame - s32e a5, a0, -44 // save a5 to end of call[j]'s stack frame - s32e a6, a0, -40 // save a6 to end of call[j]'s stack frame - s32e a7, a0, -36 // save a7 to end of call[j]'s stack frame - s32e a8, a0, -32 // save a8 to end of call[j]'s stack frame - s32e a9, a0, -28 // save a9 to end of call[j]'s stack frame - s32e a10, a0, -24 // save a10 to end of call[j]'s stack frame - s32e a11, a0, -20 // save a11 to end of call[j]'s stack frame - rfwo // rotates back to call[i] position - - .size _WindowOverflow12, . - _WindowOverflow12 - - -// 12-Register Window Underflow Vector (Handler) -// -// Invoked by RETW returning from call[i+1] to call[i] -// where call[i]'s registers must be reloaded (not live in ARs); -// call[i] had done a call12 to call[i+1]. -// On entry here: -// window rotated to call[i] start point; -// a0-a11 are undefined, must be reloaded with call[i].reg[0..11]; -// a12-a15 must be preserved (they are call[i+1].reg[0..3]); -// a13 is call[i+1]'s stack pointer. - - .org XCHAL_WINDOW_UF12_VECOFS - WINDOW_BASE_VECOFS -_WindowUnderflow12: - l32e a0, a13, -16 // restore a0 from call[i+1]'s stack frame - l32e a1, a13, -12 // restore a1 from call[i+1]'s stack frame - l32e a2, a13, -8 // restore a2 from call[i+1]'s stack frame - l32e a11, a1, -12 // a11 <- call[i-1]'s sp (used to find end of call[i]'s frame) - l32e a3, a13, -4 // restore a3 from call[i+1]'s stack frame - l32e a4, a11, -48 // restore a4 from end of call[i]'s stack frame - l32e a5, a11, -44 // restore a5 from end of call[i]'s stack frame - l32e a6, a11, -40 // restore a6 from end of call[i]'s stack frame - l32e a7, a11, -36 // restore a7 from end of call[i]'s stack frame - l32e a8, a11, -32 // restore a8 from end of call[i]'s stack frame - l32e a9, a11, -28 // restore a9 from end of call[i]'s stack frame - l32e a10, a11, -24 // restore a10 from end of call[i]'s stack frame - l32e a11, a11, -20 // restore a11 from end of call[i]'s stack frame - rfwu - - .size _WindowUnderflow12, . - _WindowUnderflow12 - - -# ifndef NO_SECTION_DIRECTIVES - //.end literal_prefix - .text -# endif - - -#endif /* XCHAL_HAVE_XEA2 && XCHAL_HAVE_WINDOWED && !defined(__XTENSA_CALL0_ABI__) */ - diff --git a/src/arch/xtensa/xtos/xtos-internal.h b/src/arch/xtensa/xtos/xtos-internal.h deleted file mode 100644 index d7b4dc8dd772..000000000000 --- a/src/arch/xtensa/xtos/xtos-internal.h +++ /dev/null @@ -1,622 +0,0 @@ -/* - * xtos-internal.h -- internal definitions for single-threaded run-time - * - * Copyright (c) 2003-2010 Tensilica Inc. - * Copyright (c) 2019 Intel Corporation. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTOS_INTERNAL_H -#define XTOS_INTERNAL_H - - -#if CONFIG_MULTICORE -#include -#endif -#include -#include -#include -#include -#include -#ifndef XTOS_PARAMS_H /* this to allow indirect inclusion of this header from the outside */ -#include "xtos-params.h" -#endif - -/* Relative ordering of subpriorities within an interrupt level (or vector): */ -#define XTOS_SPO_ZERO_LO 0 /* lower (eg. zero) numbered interrupts are lower priority than higher numbered interrupts */ -#define XTOS_SPO_ZERO_HI 1 /* lower (eg. zero) numbered interrupts are higher priority than higher numbered interrupts */ - - -/* Sanity check some parameters from xtos-params.h: */ -#if XTOS_LOCKLEVEL < XCHAL_EXCM_LEVEL || XTOS_LOCKLEVEL > 15 -# error Invalid XTOS_LOCKLEVEL value, must be >= EXCM_LEVEL and <= 15, please fix xtos-params.h -#endif - -/* Mask of interrupts locked out at XTOS_LOCKLEVEL: */ -#define XTOS_LOCKOUT_MASK XCHAL_INTLEVEL_ANDBELOW_MASK(XTOS_LOCKLEVEL) -/* Mask of interrupts that can still be enabled at XTOS_LOCKLEVEL: */ -#define XTOS_UNLOCKABLE_MASK (0xFFFFFFFF-XTOS_LOCKOUT_MASK) - -/* Don't set this: */ -#define XTOS_HIGHINT_TRAMP 0 /* mapping high-pri ints to low-pri not auto-supported */ -#define XTOS_VIRTUAL_INTERRUPT XTOS_HIGHINT_TRAMP /* partially-virtualized INTERRUPT register not currently supported */ -#if XTOS_HIGHINT_TRAMP -# error Automatically-generated high-level interrupt trampolines are not presently supported. -#endif - -/* - * If single interrupt at level-one, sub-prioritization is irrelevant: - */ -#if defined(XCHAL_INTLEVEL1_NUM) -# undef XTOS_SUBPRI -# define XTOS_SUBPRI 0 /* override - only one interrupt */ -#endif - -/* - * In XEA1, the INTENABLE special register must be virtualized to provide - * standard XTOS functionality. - * In XEA2, this is only needed for software interrupt prioritization. - */ -#if XTOS_SUBPRI || XCHAL_HAVE_XEA1 -#define XTOS_VIRTUAL_INTENABLE 1 -#else -#define XTOS_VIRTUAL_INTENABLE 0 -#endif - -/* - * If single interrupt per priority, then fairness is irrelevant: - */ -#if (XTOS_SUBPRI && !XTOS_SUBPRI_GROUPS) || defined(XCHAL_INTLEVEL1_NUM) -# undef XTOS_INT_FAIRNESS -# define XTOS_INT_FAIRNESS 0 -#endif - -/* Identify special case interrupt handling code in int-lowpri-dispatcher.S: */ -#define XTOS_INT_SPECIALCASE (XTOS_SUBPRI_ORDER == XTOS_SPO_ZERO_HI && XTOS_INT_FAIRNESS == 0 && XTOS_SUBPRI_GROUPS == 0) - -/* - * Determine whether to extend the interrupt entry array: - */ -#define XIE_EXTEND (XTOS_VIRTUAL_INTENABLE && !XTOS_INT_SPECIALCASE) - -/* If we have the NSAU instruction, ordering of interrupts is reversed in xtos_interrupt_table[]: */ -#if XCHAL_HAVE_NSA -# define MAPINT(n) ((XCHAL_NUM_INTERRUPTS-1)-(n)) -# ifdef _ASMLANGUAGE - .macro mapint an - neg \an, \an - addi \an, \an, XCHAL_NUM_INTERRUPTS-1 - .endm -# endif -#else /* no NSA */ -# define MAPINT(n) (n) -# ifdef _ASMLANGUAGE - .macro mapint an - .endm -# endif -#endif - -#define XTOS_TASK_CONTEXT_OFFSET 48 - -#if defined(_ASMLANGUAGE) || defined(__ASSEMBLER__) -/*********** Useful macros ***********/ - -/* - * A useful looping macro: - * 'iterate' invokes 'what' (an instruction, pseudo-op or other macro) - * multiple times, passing it a numbered parameter from 'from' to 'to' - * inclusively. Does not invoke 'what' at all if from > to. - * Maximum difference between 'from' and 'to' is 99 minus nesting depth - * (GNU 'as' doesn't allow nesting deeper than 100). - */ - .macro iterate from, to, what - .ifeq ((\to-\from) & ~0xFFF) - \what \from - iterate "(\from+1)", \to, \what - .endif - .endm // iterate - - - - // rsilft - // - // Execute RSIL \ar, \tolevel if \tolevel is different than \fromlevel. - // This way the RSIL is avoided if we know at assembly time that - // it will not change the level. Typically, this means the \ar register - // is ignored, ie. RSIL is used only to change PS.INTLEVEL. - // - .macro rsilft ar, fromlevel, tolevel -#if XCHAL_HAVE_INTERRUPTS - .if \fromlevel - \tolevel - rsil \ar, \tolevel - .endif -#endif - .endm - - - // Save LOOP and MAC16 registers, if configured, to the exception stack - // frame pointed to by address register \esf, using \aa and \ab as temporaries. - // - // This macro essentially saves optional registers that the compiler uses by - // default when present. - // Note that the acclo/acchi subset of MAC16 may be used even if others - // multipliers are present (e.g. mul16, mul32). - // - // Only two temp registers required for this code to be optimal (no interlocks) in both - // T10xx (Athens) and Xtensa LX microarchitectures (both 5 and 7 stage pipes): - // - .macro save_loops_mac16 esf, aa, ab -#if XCHAL_HAVE_LOOPS - rsr.lcount \aa - rsr.lbeg \ab - s32i \aa, \esf, UEXC_lcount - rsr.lend \aa - s32i \ab, \esf, UEXC_lbeg - s32i \aa, \esf, UEXC_lend -#endif -#if XCHAL_HAVE_MAC16 - rsr.acclo \aa - rsr.acchi \ab - s32i \aa, \esf, UEXC_acclo - s32i \ab, \esf, UEXC_acchi -# if XTOS_SAVE_ALL_MAC16 - rsr.m0 \aa - rsr.m1 \ab - s32i \aa, \esf, UEXC_mr + 0 - s32i \ab, \esf, UEXC_mr + 4 - rsr.m2 \aa - rsr.m3 \ab - s32i \aa, \esf, UEXC_mr + 8 - s32i \ab, \esf, UEXC_mr + 12 -# endif -#endif - .endm - - // Restore LOOP and MAC16 registers, if configured, from the exception stack - // frame pointed to by address register \esf, using \aa, \ab and \ac as temporaries. - // - // Three temp registers are required for this code to be optimal (no interlocks) in - // Xtensa LX microarchitectures with 7-stage pipe; otherwise only two - // registers would be needed. - // - .macro restore_loops_mac16 esf, aa, ab, ac -#if XCHAL_HAVE_LOOPS - l32i \aa, \esf, UEXC_lcount - l32i \ab, \esf, UEXC_lbeg - l32i \ac, \esf, UEXC_lend - wsr.lcount \aa - wsr.lbeg \ab - wsr.lend \ac -#endif -#if XCHAL_HAVE_MAC16 - l32i \aa, \esf, UEXC_acclo - l32i \ab, \esf, UEXC_acchi -# if XTOS_SAVE_ALL_MAC16 - l32i \ac, \esf, UEXC_mr + 0 - wsr.acclo \aa - wsr.acchi \ab - wsr.m0 \ac - l32i \aa, \esf, UEXC_mr + 4 - l32i \ab, \esf, UEXC_mr + 8 - l32i \ac, \esf, UEXC_mr + 12 - wsr.m1 \aa - wsr.m2 \ab - wsr.m3 \ac -# else - wsr.acclo \aa - wsr.acchi \ab -# endif -#endif - .endm - - -/* Offsets from _xtos_intstruct structure: */ - .struct 0 -#if XTOS_VIRTUAL_INTENABLE -XTOS_ENABLED_OFS: .space 4 /* _xtos_enabled variable */ -XTOS_VPRI_ENABLED_OFS: .space 4 /* _xtos_vpri_enabled variable */ -#endif -#if XTOS_VIRTUAL_INTERRUPT -XTOS_PENDING_OFS: .space 4 /* _xtos_pending variable */ -#endif - .text - - -#if XTOS_VIRTUAL_INTENABLE - // Update INTENABLE register, computing it as follows: - // INTENABLE = _xtos_enabled & _xtos_vpri_enabled - // [ & ~_xtos_pending ] - // - // Entry: - // register ax = &_xtos_intstruct - // register ay, az undefined (temporaries) - // PS.INTLEVEL set to XTOS_LOCKLEVEL or higher (eg. via xtos_lock) - // window overflows prevented (PS.WOE=0, PS.EXCM=1, or overflows - // already done for registers ax, ay, az) - // - // Exit: - // registers ax, ay, az clobbered - // PS unchanged - // caller needs to SYNC (?) for INTENABLE changes to take effect - // - // Note: in other software prioritization schemes/implementations, - // the term <_xtos_vpri_enabled> in the above expression is often - // replaced with another expression that computes the set of - // interrupts allowed to be enabled at the current software virtualized - // interrupt priority. - // - // For example, a simple alternative implementation of software - // prioritization for XTOS might have been the following: - // INTENABLE = _xtos_enabled & (vpri_enabled | UNLOCKABLE_MASK) - // which removes the need for the interrupt dispatcher to 'or' the - // UNLOCKABLE_MASK bits into _xtos_vpri_enabled, and lets other code - // disable all lockout level interrupts by just clearing _xtos_vpri_enabled - // rather than setting it to UNLOCKABLE_MASK. - // Other implementations sometimes use a table, eg: - // INTENABLE = _xtos_enabled & enable_table[current_vpri] - // The HAL (used by some 3rd party OSes) uses essentially a table-driven - // version, with other tables enabling run-time changing of priorities. - // - .macro xtos_update_intenable ax, ay, az - //movi \ax, _xtos_intstruct - l32i \ay, \ax, XTOS_VPRI_ENABLED_OFS // ay = _xtos_vpri_enabled - l32i \az, \ax, XTOS_ENABLED_OFS // az = _xtos_enabled - //interlock - and \az, \az, \ay // az = _xtos_enabled & _xtos_vpri_enabled -# if XTOS_VIRTUAL_INTERRUPT - l32i \ay, \ax, XTOS_PENDING_OFS // ay = _xtos_pending - movi \ax, -1 - xor \ay, \ay, \ax // ay = ~_xtos_pending - and \az, \az, \ay // az &= ~_xtos_pending -# endif - wsr.intenable \az - .endm -#endif /* VIRTUAL_INTENABLE */ - - .macro xtos_lock ax - rsil \ax, XTOS_LOCKLEVEL // lockout - .endm - - .macro xtos_unlock ax - wsr.ps \ax // unlock - rsync - .endm - -/* Offsets to XtosIntHandlerEntry structure fields (see below): */ -# define XIE_HANDLER 0 -# define XIE_ARG 4 -# define XIE_SIZE 8 -# if XIE_EXTEND -# define XIE_VPRIMASK (XIE_SIZE*XCHAL_NUM_INTERRUPTS+0) /* if VIRTUAL_INTENABLE [SUBPRI||XEA1] && !SPECIALCASE */ -# define XIE_LEVELMASK (XIE_SIZE*XCHAL_NUM_INTERRUPTS+4) /* [fairness preloop] if FAIRNESS && SUBPRI [&& SUBPRI_GROUPS] */ -# endif - -/* To simplify code: */ -# if XCHAL_HAVE_NSA -# define IFNSA(a,b) a -# else -# define IFNSA(a,b) b -# endif - - // get_prid ax - // Extracts core id. - .macro get_prid ax -#if XCHAL_HAVE_PRID - rsr.prid \ax - extui \ax, \ax, 0, 8 -#else - movi \ax, PLATFORM_PRIMARY_CORE_ID -#endif - .endm - -#if CONFIG_MULTICORE - // xtos_stack_addr_percore ax, ay, stack_primary, stack_secondary, stack_size - // Retrieves address of end of stack buffer for certain core to register ax. - .macro xtos_stack_addr_percore ax, ay, stack_primary_addr, mem_blk_secondary_addr, stack_size - get_prid \ax - bnei \ax, PLATFORM_PRIMARY_CORE_ID, core_s - movi \ax, \stack_primary_addr - j exit -core_s: - addi \ax, \ax, -1 - movi \ay, _core_s_size - mull \ax, \ax, \ay - movi \ay, (HEAP_SYSTEM_S_SIZE + HEAP_SYS_RUNTIME_S_SIZE) - add \ax, \ax, \ay - movi \ay, \mem_blk_secondary_addr - add \ax, \ax, \ay - j exit -exit: - movi \ay, \stack_size - add \ax, \ax, \ay - .endm - - // xtos_stack_addr_percore_add ax, stack_name, offset - // Pointer to dedicated interrupt stack + offset. - .macro xtos_stack_addr_percore_add ax, stack_name, offset - get_prid \ax - beqz \ax, core_0 - beqi \ax, 1, core_1 - beqi \ax, 2, core_2 - beqi \ax, 3, core_3 - j exit -core_0: - movi \ax, \stack_name\()0 + (\offset) - j exit -core_1: - movi \ax, \stack_name\()1 + (\offset) - j exit -core_2: - movi \ax, \stack_name\()2 + (\offset) - j exit -core_3: - movi \ax, \stack_name\()3 + (\offset) - j exit -exit: - .endm - - // xtos_addr_percore_add ax, symbol, offset - // Pointer to structure per core + offset. - .macro xtos_addr_percore_add ax, symbol, offset - xtos_addr_percore \ax, \symbol - addi \ax, \ax, \offset - .endm - - // xtos_addr_percore_sub ax, symbol, offset - // Pointer to structure per core - offset. - .macro xtos_addr_percore_sub ax, symbol, offset - xtos_addr_percore \ax, \symbol - addi \ax, \ax, -\offset - .endm -#endif /* CONFIG_MULTICORE */ - - // xtos_addr_percore ax, structure_name - // Pointer to structure per core. - .macro xtos_addr_percore ax, structure_name -#if XCHAL_HAVE_THREADPTR - rur.threadptr \ax -#else - j 1f - .align 4 - .literal_position -2: - .word SOF_VIRTUAL_THREAD_BASE -1: - .align 4 - l32r \ax, 2b - l32i \ax, \ax, 0 -#endif - l32i \ax, \ax, XTOS_PTR_TO_\structure_name - .endm - - // xtos_store_percore ax, ay, structure_name - // Stores register value under the selected structure per core. - .macro xtos_store_percore ax, ay, structure_name -#if XCHAL_HAVE_THREADPTR - rur.threadptr \ay -#else - j 1f - .align 4 - .literal_position -2: - .word SOF_VIRTUAL_THREAD_BASE -1: - .align 4 - l32r \ay, 2b - l32i \ay, \ay, 0 -#endif - s32i \ax, \ay, XTOS_PTR_TO_\structure_name - .endm - - // xtos_int_stack_addr_percore ax, int_level, stack_name - // Pointer to dedicated interrupt stack. - .macro xtos_int_stack_addr_percore ax, int_level, stack_name -#if XCHAL_HAVE_THREADPTR - rur.threadptr \ax -#else - j 1f - .align 4 - .literal_position -2: - .word SOF_VIRTUAL_THREAD_BASE -1: - .align 4 - l32r \ax, 2b - l32i \ax, \ax, 0 -#endif - l32i \ax, \ax, XTOS_PTR_TO_\stack_name\()_&int_level - .endm - - // xtos_task_ctx_percore ax - // Pointer to structure per core. - .macro xtos_task_ctx_percore ax -#if XCHAL_HAVE_THREADPTR - rur.threadptr \ax -#else - j 1f - .align 4 - .literal_position -2: - .word SOF_VIRTUAL_THREAD_BASE -1: - .align 4 - l32r \ax, 2b - l32i \ax, \ax, 0 -#endif - l32i \ax, \ax, XTOS_TASK_CONTEXT_OFFSET - .endm - - // xtos_task_ctx_store_percore ax, ay - // Changes task context to point to the selected address. - .macro xtos_task_ctx_store_percore ax, ay -#if XCHAL_HAVE_THREADPTR - rur.threadptr \ay -#else - j 1f - .align 4 - .literal_position -2: - .word SOF_VIRTUAL_THREAD_BASE -1: - .align 4 - l32r \ay, 2b - l32i \ay, \ay, 0 -#endif - s32i \ax, \ay, XTOS_TASK_CONTEXT_OFFSET - .endm - - // Executes optional callback on wake up - .macro xtos_on_wakeup -#if CONFIG_WAKEUP_HOOK - call12 arch_interrupt_on_wakeup -#endif - .endm - -#else /* !_ASMLANGUAGE && !__ASSEMBLER__ */ - -/* - * Interrupt handler table entry. - * Unregistered entries have 'handler' point to xtos_unhandled_interrupt(). - */ -typedef struct XtosIntHandlerEntry { - _xtos_handler handler; - union { - void * varg; - int narg; - } u; -} XtosIntHandlerEntry; -# if XIE_EXTEND -typedef struct XtosIntMaskEntry { - unsigned vpri_mask; /* mask of interrupts enabled when this interrupt is taken */ - unsigned level_mask; /* mask of interrupts at this interrupt's level */ -} XtosIntMaskEntry; -# endif - -#if CONFIG_MULTICORE -struct XtosIntStruct -{ - unsigned xtos_enabled; - unsigned vpri_enabled; -}; - -// XtosIntInterruptTable holds array of interrupt handler descriptors. -struct XtosIntInterruptTable -{ - struct XtosIntHandlerEntry array[XCHAL_NUM_INTERRUPTS]; -}; - -// XtosInterruptStructure describes layout of xtos interrupt structures per core -// generated for certain platform in file interrupt-table.S. -struct XtosInterruptStructure -{ - struct XtosIntStruct xtos_enabled; - struct XtosIntInterruptTable xtos_interrupt_table; - struct XtosIntMaskEntry xtos_interrupt_mask_table[XCHAL_NUM_INTERRUPTS]; - __attribute__((aligned(XCHAL_DCACHE_LINESIZE))) int al[0]; -}; -#endif - -extern void xtos_unhandled_interrupt(); - -#endif /* !_ASMLANGUAGE && !__ASSEMBLER__ */ - -/* - * Notes... - * - * XEA1 and interrupt-SUBPRIoritization both imply virtualization of INTENABLE. - * Synchronous trampoloines imply partial virtualization of the INTERRUPT - * register, which in turn also implies virtualization of INTENABLE register. - * High-level interrupts manipulating the set of enabled interrupts implies - * at least a high XTOS_LOCK_LEVEL, although not necessarily INTENABLE virtualization. - * - * With INTENABLE register virtualization, at all times the INTENABLE - * register reflects the expression: - * (set of interrupts enabled) & (set of interrupts enabled by current - * virtual priority) - * - * Unrelated (DBREAK semantics): - * - * A[31-6] = DBA[3-6] - * --------------------- - * A[5-0] & DBC[5-C] & szmask - * - * = DBA[5-0] & szmask - * ^___ ??? - */ - - -/* Report whether the XSR instruction is available (conservative): */ -#define HAVE_XSR (XCHAL_HAVE_XEA2 || !XCHAL_HAVE_EXCEPTIONS) -/* - * This is more accurate, but not a reliable test in software releases prior to 6.0 - * (where the targeted hardware parameter was not explicit in the XPG): - * - *#define HAVE_XSR (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_T1040_0) - */ - - - -/* Macros for supporting hi-level and medium-level interrupt handling. */ - -#if XCHAL_NUM_INTLEVELS > 6 -#error Template files (*-template.S) limit support to interrupt levels <= 6 -#endif - -#if defined(__XTENSA_WINDOWED_ABI__) && XCHAL_HAVE_CALL4AND12 == 0 -#error CALL8-only is not supported! -#endif - -#define INTERRUPT_IS_HI(level) \ - ( XCHAL_HAVE_INTERRUPTS && \ - (XCHAL_EXCM_LEVEL < level) && \ - (XCHAL_NUM_INTLEVELS >= level) && \ - (XCHAL_HAVE_DEBUG ? XCHAL_DEBUGLEVEL != level : 1)) - -#define INTERRUPT_IS_MED(level) \ - (XCHAL_HAVE_INTERRUPTS && (XCHAL_EXCM_LEVEL >= level)) - - -#define _JOIN(x,y) x ## y -#define JOIN(x,y) _JOIN(x,y) - -#define _JOIN3(a,b,c) a ## b ## c -#define JOIN3(a,b,c) _JOIN3(a,b,c) - -#define LABEL(x,y) JOIN3(x,_INTERRUPT_LEVEL,y) -#define EXCSAVE_LEVEL JOIN(EXCSAVE_,_INTERRUPT_LEVEL) -#define INTLEVEL_VSIZE JOIN3(XSHAL_INTLEVEL,_INTERRUPT_LEVEL,_VECTOR_SIZE) - -/* For asm macros; works for positive a,b smaller than 1000: */ -#define GREATERTHAN(a, b) (((b) - (a)) & ~0xFFF) -#define EQUAL(a, b) ((1 << (a)) & (1 << (b))) - -#if CONFIG_MULTICORE -// sizeof(xtos_enabled) -#define XTOS_ENABLED_SIZE_PER_CORE (4) -// sizeof(vpri_enabled) -#define XTOS_VPRI_ENABLED_SIZE_PER_CORE (4) -// sizeof(XtosIntStruct) -#define XTOS_INTSTRUCT_SIZE_PER_CORE (XTOS_ENABLED_SIZE_PER_CORE + \ - XTOS_VPRI_ENABLED_SIZE_PER_CORE) -#endif - -#endif /* XTOS_INTERNAL_H */ - diff --git a/src/arch/xtensa/xtos/xtos-params.h b/src/arch/xtensa/xtos/xtos-params.h deleted file mode 100644 index 334aba76d504..000000000000 --- a/src/arch/xtensa/xtos/xtos-params.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * xtos-params.h -- user-settable parameters for XTOS single-threaded run-time - * - * Copyright (c) 2002, 2004, 2006-2007 Tensilica Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sublicense, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included - * in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -#ifndef XTOS_PARAMS_H -#define XTOS_PARAMS_H - -/* - * IMPORTANT NOTE. - * This file contains XTOS parameters that may be modified - * according to needs. HOWEVER, any modifications are NOT - * supported. Handling of parameters other than the defaults - * provided in the original version of this file are for - * illustrative and educational purposes only. If you do - * change the parameters here-in (which requires rebuilding - * XTOS), please verify the resulting code extensively - * before even considering its use in production code. - * - * To rebuild XTOS, see instructions in the Xtensa System Software - * Reference Manual. The following sequence is no longer supported. - * - * cd /xtensa-elf/src/handlers - * xt-make clean - * xt-make - * xt-make install - * - * (Note: the last step installs the modified XTOS in *ALL* - * LSPs that normally include XTOS. You may prefer copying - * the generated files to your own custom LSP instead. Or - * better yet, also make a copy of all source files and maintain - * them somewhere completely separate -- which may require - * minor adjustments to the makefile.) - * - * PERFORMANCE TUNING: - * To slightly improve performance of interrupt dispatching, - * you can do some combination of the following: - * - change XTOS_SUBPRI to zero - * - change XTOS_SUBPRI_GROUPS to zero - * - change XTOS_SUBPRI_ORDER to XTOS_SPO_ZERO_HI - * - change XTOS_DEBUG_PC to zero - * - change XTOS_INT_FAIRNESS to zero - * - change XTOS_CNEST to zero - * There are non-trivial trade-offs in making such changes however, - * such as loss of support (see important note above), loss of - * interrupt scheduling fairness, loss of ability to traceback - * interrupt handlers across interrupted code when debugging them, - * loss of supported for nested C functions, etc. - */ - - -/* - * Lower LOCKLEVEL to XCHAL_EXCM_LEVEL for improved interrupt latency - * if you don't register C handlers for high-priority interrupts and your - * high-priority handlers don't touch INTENABLE nor virtual priorities. - * - * XTOS_LOCKLEVEL is less meaningful but still relevant if XEA2 and SUBPRI is zero, - * ie. if INTENABLE doesn't get virtualized (XTOS_VIRTUAL_INTENABLE not set); - * in this case, it is the interrupt level at which INTENABLE accesses are guarded, - * so that interrupt handlers up to this level can safely manipulate INTENABLE. - */ -#define XTOS_LOCKLEVEL XCHAL_NUM_INTLEVELS /* intlevel of INTENABLE register virtualization - (minimum is EXCM_LEVEL) */ - -/* - * NOTE: the following four parameters (SUBPRI, SUBPRI_GROUPS, SUBPRI_ORDER, INT_FAIRNESS) - * are irrelevant and ignored for interrupt vectors to which only one interrupt is mapped. - */ - -#define XTOS_SUBPRI 1 /* set to 0 if you don't need sub-prioritization - within level-one interrupts via software; - for XEA2 configs, this might improve performance of - certain sections of code, because INTENABLE register - virtualization becomes unnecessary in this case */ - -/* Ignored unless SUBPRI set: */ -#define XTOS_SUBPRI_GROUPS 1 /* 1 = support selective grouping of interrupts at the same priority */ - -#define XTOS_SUBPRI_ORDER XTOS_SPO_ZERO_LO /* one of XTOS_SPO_ZERO_LO, XTOS_SPO_ZERO_HI */ - -/* Ignored if SUBPRI set but SUBPRI_GROUPS is not (single interrupt per subpri), - * or if single interrupt configured at level/vector: */ -#define XTOS_INT_FAIRNESS 1 /* 1 = enable round-robin/fifo scheduling of interrupt - handlers of a given level or sub-priority */ - - -#define XTOS_DEBUG_PC 1 /* 1 = enable nice stack traceback showing interrupted code - when debugging interrupt or exception handler; - not implemented for high-priority handlers, or - for call0 ABI */ - -#define XTOS_CNEST 1 /* 1 = enable support for nested C functions - (save/restore nested C function call-chain pointer) */ - -/* Current compilers only use ACC (not MRn) when MAC16 is enabled, so you can leave this 0 for performance: */ -#define XTOS_SAVE_ALL_MAC16 0 /* set to save/restore MAC16 MRn registers */ - -/* Setting this might be useful to clear X's in hardware simulation a bit earlier, but - * should not be needed in production code: */ -#define XTOS_RESET_UNNEEDED 0 /* set to reset more registers than are really needed */ - -#endif /* XTOS_PARAMS_H */ - diff --git a/src/arch/xtensa/xtos/xtos-structs.h b/src/arch/xtensa/xtos/xtos-structs.h deleted file mode 100644 index 90f7410f0983..000000000000 --- a/src/arch/xtensa/xtos/xtos-structs.h +++ /dev/null @@ -1,63 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -#ifndef __XTOS_XTOS_STRUCTS_H__ -#define __XTOS_XTOS_STRUCTS_H__ - -#include "xtos-internal.h" -#include -#include - -#include -#include - -struct idc; -struct notify; -struct schedulers; -struct task; - -struct thread_data { - xtos_structures_pointers xtos_ptrs; - volatile xtos_task_context *xtos_active_task; -}; - -struct xtos_core_data { -#if CONFIG_MULTICORE - struct XtosInterruptStructure xtos_int_data; -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_1 - uint8_t xtos_stack_for_interrupt_1[SOF_STACK_SIZE] __aligned(16); -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_2 - uint8_t xtos_stack_for_interrupt_2[SOF_STACK_SIZE] __aligned(16); -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_3 - uint8_t xtos_stack_for_interrupt_3[SOF_STACK_SIZE] __aligned(16); -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_4 - uint8_t xtos_stack_for_interrupt_4[SOF_STACK_SIZE] __aligned(16); -#endif -#if CONFIG_XT_INTERRUPT_LEVEL_5 - uint8_t xtos_stack_for_interrupt_5[SOF_STACK_SIZE] __aligned(16); -#endif - xtos_task_context xtos_interrupt_ctx; - uintptr_t xtos_saved_sp; - struct thread_data *thread_data_ptr; -}; - -struct core_context { - struct thread_data td; - struct task *main_task; - struct schedulers *schedulers; - struct notify *notify; -#ifdef CONFIG_AMS - struct async_message_service *ams; -#endif - struct idc *idc; -}; - -#endif /* __XTOS_XTOS_STRUCTS_H__ */ diff --git a/xtos/include/CMakeLists.txt b/xtos/include/CMakeLists.txt deleted file mode 100644 index fd819740233b..000000000000 --- a/xtos/include/CMakeLists.txt +++ /dev/null @@ -1 +0,0 @@ -target_include_directories(sof_public_headers INTERFACE include) diff --git a/xtos/include/rtos/alloc.h b/xtos/include/rtos/alloc.h deleted file mode 100644 index 1ceaef3e8b8f..000000000000 --- a/xtos/include/rtos/alloc.h +++ /dev/null @@ -1,174 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Keyon Jie - */ - -/** - * \file xtos/include/rtos/alloc.h - * \brief Memory Allocation API definition - * \author Liam Girdwood - * \author Keyon Jie - */ - -#ifndef __SOF_LIB_ALLOC_H__ -#define __SOF_LIB_ALLOC_H__ - -#include -#include -#include -#include - -#include -#include - -/** \addtogroup alloc_api Memory Allocation API - * @{ - */ - -/** - * \brief Heap Memory Zones - * - * The heap has three different zones from where memory can be allocated :- - * - * 1) System Zone. Fixed size heap where alloc always succeeds and is never - * freed. Used by any init code that will never give up the memory. - * - * 2) System Runtime Zone. Heap zone intended for runtime objects allocated - * by the kernel part of the code. - * - * 3) Runtime Zone. Main and larger heap zone where allocs are not guaranteed to - * succeed. Memory can be freed here. - * - * 4) Buffer Zone. Largest heap zone intended for audio buffers. - * - * 5) Runtime Shared Zone. Similar to Runtime Zone, but content may be used and - * fred from any enabled core. - * - * 6) System Shared Zone. Similar to System Zone, but content may be used from - * any enabled core. - * - * See platform/memory.h for heap size configuration and mappings. - */ -enum mem_zone { - SOF_MEM_ZONE_SYS = 0, /**< System zone */ - SOF_MEM_ZONE_SYS_RUNTIME, /**< System-runtime zone */ - SOF_MEM_ZONE_RUNTIME, /**< Runtime zone */ - SOF_MEM_ZONE_BUFFER, /**< Buffer zone */ - SOF_MEM_ZONE_RUNTIME_SHARED, /**< Runtime shared zone */ - SOF_MEM_ZONE_SYS_SHARED, /**< System shared zone */ -}; - -/** \name Heap zone flags - * @{ - */ - -/** \brief Indicates that original content should not be copied by realloc. */ -#define SOF_MEM_FLAG_NO_COPY BIT(1) -/** \brief Indicates that if we should return uncached address. */ -#define SOF_MEM_FLAG_COHERENT BIT(2) - -/** @} */ - -/** - * Allocates memory block. - * @param zone Zone to allocate memory from, see enum mem_zone. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param caps Capabilities, see SOF_MEM_CAPS_... - * @param bytes Size in bytes. - * @return Pointer to the allocated memory or NULL if failed. - * - * @note Do not use for buffers (SOF_MEM_ZONE_BUFFER zone). - * Use rballoc(), rballoc_align() to allocate memory for buffers. - */ -void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes); - -/** - * Similar to rmalloc(), guarantees that returned block is zeroed. - * - * @note Do not use for buffers (SOF_MEM_ZONE_BUFFER zone). - * rballoc(), rballoc_align() to allocate memory for buffers. - */ -void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes); - -/** - * Allocates memory block from SOF_MEM_ZONE_BUFFER. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param caps Capabilities, see SOF_MEM_CAPS_... - * @param bytes Size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the allocated memory or NULL if failed. - */ -void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, - uint32_t alignment); - -/** - * Similar to rballoc_align(), returns buffer aligned to PLATFORM_DCACHE_ALIGN. - */ -static inline void *rballoc(uint32_t flags, uint32_t caps, size_t bytes) -{ - return rballoc_align(flags, caps, bytes, PLATFORM_DCACHE_ALIGN); -} - -/** - * Changes size of the memory block allocated from SOF_MEM_ZONE_BUFFER. - * @param ptr Address of the block to resize. - * @param flags Flags, see SOF_MEM_FLAG_... - * @param caps Capabilities, see SOF_MEM_CAPS_... - * @param bytes New size in bytes. - * @param old_bytes Old size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the resized memory of NULL if failed. - */ -void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, - size_t old_bytes, uint32_t alignment); - -/** - * Similar to rballoc_align(), returns resized buffer aligned to - * PLATFORM_DCACHE_ALIGN. - */ -static inline void *rbrealloc(void *ptr, uint32_t flags, uint32_t caps, - size_t bytes, size_t old_bytes) -{ - return rbrealloc_align(ptr, flags, caps, bytes, old_bytes, - PLATFORM_DCACHE_ALIGN); -} - -/** - * Frees the memory block. - * @param ptr Pointer to the memory block. - * - * @note Blocks from SOF_MEM_ZONE_SYS cannot be freed, such a call causes - * panic. - */ -void rfree(void *ptr); - -/** - * Allocates memory block from the system heap reserved for the specified core. - * @param core Core id. - * @param bytes Size in bytes. - */ -void *rzalloc_core_sys(int core, size_t bytes); - -/** - * Calculates length of the null-terminated string. - * @param s String. - * @return Length of the string in bytes. - */ -int rstrlen(const char *s); - -/** - * Compares two strings, see man strcmp. - * @param s1 First string to compare. - * @param s2 Second string to compare. - * @return See man strcmp. - */ -int rstrcmp(const char *s1, const char *s2); - -static inline void l3_heap_save(void) {} - -/** @}*/ - -#endif /* __SOF_LIB_ALLOC_H__ */ diff --git a/xtos/include/rtos/atomic.h b/xtos/include/rtos/atomic.h deleted file mode 100644 index 6d16b6b6c9e6..000000000000 --- a/xtos/include/rtos/atomic.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __SOF_ATOMIC_H__ -#define __SOF_ATOMIC_H__ - -#include -#include - -static inline void atomic_init(atomic_t *a, int32_t value) -{ - arch_atomic_init(a, value); -} - -static inline int32_t atomic_read(const atomic_t *a) -{ - return arch_atomic_read(a); -} - -static inline void atomic_set(atomic_t *a, int32_t value) -{ - arch_atomic_set(a, value); -} - -static inline int32_t atomic_add(atomic_t *a, int32_t value) -{ - return arch_atomic_add(a, value); -} - -static inline int32_t atomic_sub(atomic_t *a, int32_t value) -{ - return arch_atomic_sub(a, value); -} - -#endif /* __SOF_ATOMIC_H__ */ diff --git a/xtos/include/rtos/cache.h b/xtos/include/rtos/cache.h deleted file mode 100644 index 914e61b52984..000000000000 --- a/xtos/include/rtos/cache.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -/** - * \file xtos/include/rtos/cache.h - * \brief Cache header file - * \authors Tomasz Lauda - */ - -#ifndef __SOF_LIB_CACHE_H__ -#define __SOF_LIB_CACHE_H__ - -#include - -/* writeback and invalidate data */ -#define CACHE_WRITEBACK_INV 0 - -/* invalidate data */ -#define CACHE_INVALIDATE 1 - -#endif /* __SOF_LIB_CACHE_H__ */ diff --git a/xtos/include/rtos/clk.h b/xtos/include/rtos/clk.h deleted file mode 100644 index e9a36796bc42..000000000000 --- a/xtos/include/rtos/clk.h +++ /dev/null @@ -1,83 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Janusz Jankowski - */ - -#ifndef __SOF_LIB_CLK_H__ -#define __SOF_LIB_CLK_H__ - -#include -#include -#include -#include -#include - -struct timer; - -#define CLOCK_NOTIFY_PRE 0 -#define CLOCK_NOTIFY_POST 1 - -struct clock_notify_data { - uint32_t old_freq; - uint32_t old_ticks_per_msec; - uint32_t freq; - uint32_t ticks_per_msec; - uint32_t message; -}; - -struct freq_table { - uint32_t freq; - uint32_t ticks_per_msec; -}; - -struct clock_info { - uint32_t freqs_num; - const struct freq_table *freqs; - uint32_t default_freq_idx; - uint32_t current_freq_idx; - uint32_t lowest_freq_idx; /* lowest possible clock */ - uint32_t notification_id; - uint32_t notification_mask; - - /* persistent change clock value in active state, caller must hold clk_lock */ - int (*set_freq)(int clock, int freq_idx); - - /* temporary change clock - don't modify default clock settings */ - void (*low_power_mode)(int clock, bool enable); -}; - -uint32_t clock_get_freq(int clock); - -void clock_set_freq(int clock, uint32_t hz); - -void clock_low_power_mode(int clock, bool enable); - -uint64_t clock_ms_to_ticks(int clock, uint64_t ms); - -uint64_t clock_us_to_ticks(int clock, uint64_t us); - -uint64_t clock_ns_to_ticks(int clock, uint64_t ns); - -uint64_t clock_ticks_per_sample(int clock, uint32_t sample_rate); - -extern struct k_spinlock clk_lock; - -static inline k_spinlock_key_t clock_lock(void) -{ - return k_spin_lock(&clk_lock); -} - -static inline void clock_unlock(k_spinlock_key_t key) -{ - k_spin_unlock(&clk_lock, key); -} - -static inline struct clock_info *clocks_get(void) -{ - return sof_get()->clocks; -} - -#endif /* __SOF_LIB_CLK_H__ */ diff --git a/xtos/include/rtos/idc.h b/xtos/include/rtos/idc.h deleted file mode 100644 index f0cb09ec6f2f..000000000000 --- a/xtos/include/rtos/idc.h +++ /dev/null @@ -1,180 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -/** - * \file include/rtos/idc.h - * \brief IDC header file - * \authors Tomasz Lauda - */ - -#ifndef __XTOS_RTOS_IDC_H__ -#define __XTOS_RTOS_IDC_H__ - -#include -#include -#include -#include -#include -#include -#include - -/** \brief IDC send blocking flag. */ -#define IDC_BLOCKING 0 - -/** \brief IDC send non-blocking flag. */ -#define IDC_NON_BLOCKING 1 - -/** \brief IDC send core power up flag. */ -#define IDC_POWER_UP 2 - -/** \brief IDC send core power down flag. */ -#define IDC_POWER_DOWN 3 - -/** \brief IDC send timeout in microseconds. */ -#define IDC_TIMEOUT 10000 - -/** \brief IDC task deadline. */ -#define IDC_DEADLINE 100 - -/** \brief ROM wake version parsed by ROM during core wake up. */ -#define IDC_ROM_WAKE_VERSION 0x2 - -/** \brief IDC message type. */ -#define IDC_TYPE_SHIFT 24 -#define IDC_TYPE_MASK 0x7f -#define IDC_TYPE(x) (((x) & IDC_TYPE_MASK) << IDC_TYPE_SHIFT) - -/** \brief IDC message header. */ -#define IDC_HEADER_MASK 0xffffff -#define IDC_HEADER(x) ((x) & IDC_HEADER_MASK) - -/** \brief IDC message extension. */ -#define IDC_EXTENSION_MASK 0x3fffffff -#define IDC_EXTENSION(x) ((x) & IDC_EXTENSION_MASK) - -/** \brief IDC power up message. */ -#define IDC_MSG_POWER_UP (IDC_TYPE(0x1) | \ - IDC_HEADER(IDC_ROM_WAKE_VERSION)) -#define IDC_MSG_POWER_UP_EXT IDC_EXTENSION(SOF_TEXT_START >> 2) - -/** \brief IDC power down message. */ -#define IDC_MSG_POWER_DOWN IDC_TYPE(0x2) -#define IDC_MSG_POWER_DOWN_EXT IDC_EXTENSION(0x0) - -/** \brief IDC notify message. */ -#define IDC_MSG_NOTIFY IDC_TYPE(0x3) -#define IDC_MSG_NOTIFY_EXT IDC_EXTENSION(0x0) - -/** \brief IDC IPC processing message. */ -#define IDC_MSG_IPC IDC_TYPE(0x4) -#define IDC_MSG_IPC_EXT IDC_EXTENSION(0x0) - -/** \brief IDC component params message. */ -#define IDC_MSG_PARAMS IDC_TYPE(0x5) -#define IDC_MSG_PARAMS_EXT(x) IDC_EXTENSION(x) - -/** \brief IDC component prepare message. */ -#define IDC_MSG_PREPARE IDC_TYPE(0x6) -#define IDC_MSG_PREPARE_EXT(x) IDC_EXTENSION(x) - -/** \brief IDC component trigger message. */ -#define IDC_MSG_TRIGGER IDC_TYPE(0x7) -#define IDC_MSG_TRIGGER_EXT(x) IDC_EXTENSION(x) - -/** \brief IDC component reset message. */ -#define IDC_MSG_RESET IDC_TYPE(0x8) -#define IDC_MSG_RESET_EXT(x) IDC_EXTENSION(x) - -/** \brief IDC prepare D0ix message. */ -#define IDC_MSG_PREPARE_D0ix IDC_TYPE(0x9) -#define IDC_MSG_PREPARE_D0ix_EXT IDC_EXTENSION(0x0) - -/** \brief IDC secondary core crashed notify message. */ -#define IDC_MSG_SECONDARY_CORE_CRASHED IDC_TYPE(0xA) -#define IDC_MSG_SECONDARY_CORE_CRASHED_EXT(x) IDC_EXTENSION(x) - -/** \brief IDC process async msg */ -#define IDC_MSG_AMS IDC_TYPE(0xB) -#define IDC_MSG_AMS_EXT IDC_EXTENSION(0x0) - -#define IDC_MSG_BIND IDC_TYPE(0xD) -#define IDC_MSG_UNBIND IDC_TYPE(0xE) -#define IDC_MSG_GET_ATTRIBUTE IDC_TYPE(0xF) - -#define IDC_HEADER_TO_AMS_SLOT_MASK(x) (x & 0xFFFF) - -/** \brief IDC_MSG_SECONDARY_CORE_CRASHED header fields. */ -#define IDC_SCC_CORE_SHIFT 0 -#define IDC_SCC_CORE_MASK 0xff -#define IDC_SCC_CORE(x) (((x) & IDC_SCC_CORE_MASK) << IDC_SCC_CORE_SHIFT) - -#define IDC_SCC_REASON_SHIFT 8 -#define IDC_SCC_REASON_MASK 0xff -#define IDC_SCC_REASON(x) (((x) & IDC_SCC_REASON_MASK) << IDC_SCC_REASON_SHIFT) - -/** \brief Secondary core crash reasons. */ -#define IDC_SCC_REASON_WATCHDOG 0x00 -#define IDC_SCC_REASON_EXCEPTION 0x01 - -/** \brief Decodes IDC message type. */ -#define iTS(x) (((x) >> IDC_TYPE_SHIFT) & IDC_TYPE_MASK) - -/** \brief Max IDC message payload size in bytes. */ -#define IDC_MAX_PAYLOAD_SIZE (DCACHE_LINE_SIZE * 2) - -/** \brief IDC free function flags */ -#define IDC_FREE_IRQ_ONLY BIT(0) /**< disable only irqs */ - -/** \brief IDC message payload. */ -struct idc_payload { - uint8_t data[IDC_MAX_PAYLOAD_SIZE]; -}; - -/** \brief IDC message. */ -struct idc_msg { - uint32_t header; /**< header value */ - uint32_t extension; /**< extension value */ - uint32_t core; /**< core id */ - uint32_t size; /**< payload size in bytes */ - void *payload; /**< pointer to payload data */ -}; - -/** \brief IDC data. */ -struct idc { - uint32_t busy_bit_mask; /**< busy interrupt mask */ - struct idc_msg received_msg; /**< received message */ - struct task idc_task; /**< IDC processing task */ - struct idc_payload *payload; - int irq; -}; - -/* idc trace context, used by multiple units */ -extern struct tr_ctx idc_tr; - -static inline struct idc_payload *idc_payload_get(struct idc *idc, - uint32_t core) -{ - return idc->payload + core; -} - -void idc_enable_interrupts(int target_core, int source_core); - -void idc_free(uint32_t flags); - -int platform_idc_init(void); - -int platform_idc_restore(void); - -enum task_state idc_do_cmd(void *data); - -void idc_cmd(struct idc_msg *msg); - -int idc_msg_status_get(uint32_t core); - -void idc_init_thread(void); - -#endif /* __XTOS_RTOS_IDC_H__ */ diff --git a/xtos/include/rtos/init.h b/xtos/include/rtos/init.h deleted file mode 100644 index ebe2ffafc062..000000000000 --- a/xtos/include/rtos/init.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause */ -/* - * Copyright(c) 2023 Intel Corporation. All rights reserved. - */ - -#ifndef __XTOS_RTOS_INIT_H__ -#define __XTOS_RTOS_INIT_H__ - -#define SOF_MODULE_INIT(name, init) - -#endif /* __XTOS_RTOS_INIT_H__ */ diff --git a/xtos/include/rtos/interrupt.h b/xtos/include/rtos/interrupt.h deleted file mode 100644 index 58cefae94c81..000000000000 --- a/xtos/include/rtos/interrupt.h +++ /dev/null @@ -1,217 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Janusz Jankowski - */ - -#ifndef __SOF_DRIVERS_INTERRUPT_H__ -#define __SOF_DRIVERS_INTERRUPT_H__ - -#include - -#if !defined(__ASSEMBLER__) && !defined(LINKER) -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/** - * \brief child IRQ descriptor for cascading IRQ controllers. - */ -struct irq_child { - int enable_count[CONFIG_CORE_COUNT]; /**< IRQ enable counter */ - struct list_item list; /**< head for IRQ descriptors, - * sharing this interrupt - */ -}; - -/** - * \brief interrupt client descriptor - */ -struct irq_desc { - int irq; /**< virtual IRQ number */ - void (*handler)(void *arg); /**< interrupt handler function */ - void *handler_arg; /**< interrupt handler argument */ - uint32_t cpu_mask; /**< a mask of CPUs on which this - * interrupt is enabled - */ - struct list_item irq_list; /**< to link to other irq_desc */ -}; - -/** - * \brief cascading IRQ controller operations. - */ -struct irq_cascade_ops { - void (*mask)(struct irq_desc *desc, uint32_t irq, - unsigned int cpu); /**< mask */ - void (*unmask)(struct irq_desc *desc, uint32_t irq, - unsigned int cpu); /**< unmask */ -}; - -/** - * \brief cascading interrupt controller descriptor. - */ -struct irq_cascade_desc { - const char *name; /**< name of the - * controller - */ - int irq_base; /**< first virtual IRQ - * number, assigned to - * this controller - */ - const struct irq_cascade_ops *ops; /**< cascading interrupt - * controller driver - * operations - */ - struct irq_desc desc; /**< the interrupt, that - * this controller is - * generating - */ - struct irq_cascade_desc *next; /**< link to the global - * list of interrupt - * controllers - */ - bool global_mask; /**< the controller - * cannot mask input - * interrupts per core - */ - struct k_spinlock lock; /**< protect child - * lists, enable and - * child counters - */ - int enable_count[CONFIG_CORE_COUNT]; /**< enabled child - * interrupt counter - */ - unsigned int num_children[CONFIG_CORE_COUNT]; /**< number of children - */ - struct irq_child child[PLATFORM_IRQ_CHILDREN]; /**< array of child - * lists - one per - * multiplexed IRQ - */ -}; - -/* A descriptor for cascading interrupt controller template */ -struct irq_cascade_tmpl { - const char *name; - const struct irq_cascade_ops *ops; - int irq; - void (*handler)(void *arg); - bool global_mask; -}; - -/** - * \brief Cascading interrupt controller root. - */ -struct cascade_root { - struct k_spinlock lock; /**< locking mechanism */ - struct irq_cascade_desc *list; /**< list of child cascade irqs */ - int last_irq; /**< last registered cascade irq */ -}; - -static inline struct cascade_root *cascade_root_get(void) -{ - return sof_get()->cascade_root; -} - -/* For i.MX, while building SOF with Zephyr use the interrupt_* - * functions from second level interrupt handling and IRQ_STEER. - */ -#if defined(__ZEPHYR__) && (defined(CONFIG_IMX) || defined(CONFIG_AMD)) -int mux_interrupt_get_irq(unsigned int irq, const char *cascade); -int mux_interrupt_register(uint32_t irq, void(*handler)(void *arg), void *arg); -void mux_interrupt_unregister(uint32_t irq, const void *arg); -uint32_t mux_interrupt_enable(uint32_t irq, void *arg); -uint32_t mux_interrupt_disable(uint32_t irq, void *arg); -#endif - -int interrupt_register(uint32_t irq, void(*handler)(void *arg), void *arg); -void interrupt_unregister(uint32_t irq, const void *arg); -uint32_t interrupt_enable(uint32_t irq, void *arg); -uint32_t interrupt_disable(uint32_t irq, void *arg); - -/* Zephyr compat */ -#if !defined(__ZEPHYR__) -#define arch_irq_lock() arch_interrupt_disable_mask(0xffffffff) -#endif - -void platform_interrupt_init(void); - -void platform_interrupt_set(uint32_t irq); -void platform_interrupt_clear(uint32_t irq, uint32_t mask); -uint32_t platform_interrupt_get_enabled(void); -void interrupt_mask(uint32_t irq, unsigned int cpu); -void interrupt_unmask(uint32_t irq, unsigned int cpu); - -/* - * On platforms, supporting cascading interrupts cascaded interrupt numbers - * are greater than or equal to PLATFORM_IRQ_HW_NUM - */ -#define interrupt_is_dsp_direct(irq) (!PLATFORM_IRQ_CHILDREN || \ - irq < PLATFORM_IRQ_HW_NUM) - -void interrupt_init(struct sof *sof); -int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl); -struct irq_cascade_desc *interrupt_get_parent(uint32_t irq); -int interrupt_get_irq(unsigned int irq, const char *cascade); - -static inline void interrupt_set(int irq) -{ - platform_interrupt_set(irq); -} - -static inline void interrupt_clear_mask(int irq, uint32_t mask) -{ - platform_interrupt_clear(irq, mask); -} - -static inline void interrupt_clear(int irq) -{ - interrupt_clear_mask(irq, 1); -} - -static inline uint32_t interrupt_global_disable(void) -{ - return arch_interrupt_global_disable(); -} - -static inline void interrupt_global_enable(uint32_t flags) -{ - arch_interrupt_global_enable(flags); -} - -#if CONFIG_LIBRARY - -/* temporary fix to remove build warning for testbench that will need shortly - * realigned when Zephyr native APIs are used. - */ -static inline void __irq_local_disable(unsigned long flags) {} -static inline void __irq_local_enable(unsigned long flags) {} - -/* disables all IRQ sources on current core - NO effect on library */ -#define irq_local_disable(flags) \ - do { \ - flags = 0; \ - __irq_local_disable(flags); \ - } while (0) - -/* re-enables IRQ sources on current core - NO effect on library*/ -#define irq_local_enable(flags) \ - __irq_local_enable(flags) - -#else -/* disables all IRQ sources on current core */ -#define irq_local_disable(flags) \ - (flags = interrupt_global_disable()) - -/* re-enables IRQ sources on current core */ -#define irq_local_enable(flags) \ - interrupt_global_enable(flags) -#endif -#endif -#endif /* __SOF_DRIVERS_INTERRUPT_H__ */ diff --git a/xtos/include/rtos/kernel.h b/xtos/include/rtos/kernel.h deleted file mode 100644 index 23dd42960046..000000000000 --- a/xtos/include/rtos/kernel.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2022 Intel Corporation. All rights reserved. - * - * Author: Jyri Sarha - */ - -#ifndef __XTOS_RTOS_KERNEL_H__ -#define __XTOS_RTOS_KERNEL_H__ - -#include - -#include - -#ifdef __ZEPHYR__ -#error "This file should only be included in XTOS builds." -#endif - -typedef uint32_t k_ticks_t; - -typedef struct { - k_ticks_t ticks; -} k_timeout_t; - -#define Z_TIMEOUT_TICKS(t) ((k_timeout_t) { .ticks = (t) }) - -#define Z_TIMEOUT_US(t) ((k_timeout_t) { .ticks = clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, t) }) - -static inline void k_sleep(k_timeout_t timeout) -{ - wait_delay(timeout.ticks); -} - -static inline void k_msleep(int32_t ms) -{ - wait_delay_ms(ms); -} - -static inline void k_usleep(int32_t us) -{ - wait_delay_us(us); -} - -#endif /* __XTOS_RTOS_KERNEL_H__ */ diff --git a/xtos/include/rtos/mutex.h b/xtos/include/rtos/mutex.h deleted file mode 100644 index 947cde944466..000000000000 --- a/xtos/include/rtos/mutex.h +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2022 Intel Corporation. All rights reserved. - * - */ - -/* - * Simple mutex implementation for SOF. - */ - -#ifndef __XTOS_RTOS_MUTEX_H -#define __XTOS_RTOS_MUTEX_H - -#include -#include -#include - -#define K_FOREVER ((k_timeout_t) { .ticks = 0xffffffff }) - -struct k_mutex { - struct k_spinlock lock; - k_spinlock_key_t key; -}; - -static inline int k_mutex_init(struct k_mutex *mutex) -{ - k_spinlock_init(&mutex->lock); - return 0; -} - -static inline int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout) -{ - mutex->key = k_spin_lock(&mutex->lock); - return 0; -} - -static inline int k_mutex_unlock(struct k_mutex *mutex) -{ - k_spin_unlock(&mutex->lock, mutex->key); - return 0; -} - -#endif diff --git a/xtos/include/rtos/panic.h b/xtos/include/rtos/panic.h deleted file mode 100644 index 921ea710d88f..000000000000 --- a/xtos/include/rtos/panic.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright 2023 NXP - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __XTOS_RTOS_PANIC_H__ -#define __XTOS_RTOS_PANIC_H__ - -#include -#include -#include - -#ifdef __ZEPHYR__ -#error "This file should only be included in XTOS builds." -#endif /* __ZEPHYR__ */ - -#ifdef __clang_analyzer__ -#define SOF_NORETURN __attribute__((analyzer_noreturn)) -#elif __GNUC__ -#define SOF_NORETURN __attribute__((noreturn)) -#else -#define SOF_NORETURN -#endif - -#ifndef RELATIVE_FILE -#error "This file requires RELATIVE_FILE to be defined. "\ - "Add it to CMake's target with sof_append_relative_path_definitions." -#endif - -void dump_panicinfo(void *addr, struct sof_ipc_panic_info *panic_info); -void panic_dump(uint32_t p, struct sof_ipc_panic_info *panic_info, - uintptr_t *data) SOF_NORETURN; -void __panic(uint32_t p, const char *filename, uint32_t linenum) SOF_NORETURN; - -/** panic dump filename and linenumber of the call - * - * \param x panic code defined in ipc/trace.h - */ -#define sof_panic(x) __panic((x), (RELATIVE_FILE), (__LINE__)) - -/* runtime assertion */ -#ifndef assert -#define assert(cond) (void)((cond) || (sof_panic(SOF_IPC_PANIC_ASSERT), 0)) -#endif - -#endif /* __XTOS_RTOS_PANIC_H__ */ diff --git a/xtos/include/rtos/sof.h b/xtos/include/rtos/sof.h deleted file mode 100644 index 4214122c9a56..000000000000 --- a/xtos/include/rtos/sof.h +++ /dev/null @@ -1,124 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __XTOS_RTOS_SOF_H__ -#define __XTOS_RTOS_SOF_H__ - -#include -#include -#include -#include - -struct cascade_root; -struct clock_info; -struct comp_driver_list; -struct dai_info; -struct dma_info; -struct dma_trace_data; -struct ipc; -struct ll_schedule_domain; -struct mm; -struct mn; -struct ams_shared_context; -struct notify_data; -struct pm_runtime_data; -struct sa; -struct timer; -struct trace; -struct pipeline_posn; -struct probe_pdata; - -/** - * \brief General firmware context. - * This structure holds all the global pointers, which can potentially - * be accessed by SMP code, hence it should be aligned to platform's - * data cache line size. Alignments in the both beginning and end are needed - * to avoid potential before and after data evictions. - */ -struct sof { - /* init data */ - int argc; - char **argv; - - /* ipc */ - struct ipc *ipc; - - /* system agent */ - struct sa *sa; - - /* DMA for Trace*/ - struct dma_trace_data *dmat; - - /* generic trace structure */ - struct trace *trace; - - /* platform clock information */ - struct clock_info *clocks; - - /* default platform timer */ - struct timer *platform_timer; - - /* cpu (arch) timers - 1 per core */ - struct timer *cpu_timers; - - /* timer domain for driving timer LL scheduler */ - struct ll_schedule_domain *platform_timer_domain; - - /* DMA domain for driving DMA LL scheduler */ - struct ll_schedule_domain *platform_dma_domain; - - /* memory map */ - struct mm *memory_map; - - /* runtime power management data */ - struct pm_runtime_data *prd; - -#ifdef CONFIG_AMS - /* asynchronous messaging service */ - struct ams_shared_context *ams_shared_ctx; -#endif - - /* shared notifier data */ - struct notify_data *notify_data; - - /* platform dai information */ - const struct dai_info *dai_info; - - /* platform DMA information */ - const struct dma_info *dma_info; - - /* cascading interrupt controller root */ - struct cascade_root *cascade_root; - - /* list of registered component drivers */ - struct comp_driver_list *comp_drivers; - - /* M/N dividers */ - struct mn *mn; - - /* probes */ - struct probe_pdata *probe; - - /* pipelines stream position */ - struct pipeline_posn *pipeline_posn; - -#ifdef CONFIG_LIBRARY_MANAGER - /* dynamically loaded libraries */ - struct ext_library *ext_library; -#endif - -#if CONFIG_IPC_MAJOR_4 - /* lock for fw_reg access */ - struct k_spinlock fw_reg_lock; -#endif - - __aligned(PLATFORM_DCACHE_ALIGN) int alignment[0]; -} __aligned(PLATFORM_DCACHE_ALIGN); - -struct sof *sof_get(void); - -#endif /* __XTOS_RTOS_SOF_H__ */ diff --git a/xtos/include/rtos/spinlock.h b/xtos/include/rtos/spinlock.h deleted file mode 100644 index 2c5c7d94fdd8..000000000000 --- a/xtos/include/rtos/spinlock.h +++ /dev/null @@ -1,163 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Keyon Jie - */ - -/* - * Simple spinlock implementation for SOF. - */ - -#ifndef __XTOS_RTOS_SPINLOCK_H__ -#define __XTOS_RTOS_SPINLOCK_H__ - -#include -typedef uint32_t k_spinlock_key_t; -#include -#include - -#include - -/* - * Lock debugging provides a simple interface to debug deadlocks. The rmbox - * trace output will show an output :- - * - * 0xd70 [41.306406] delta [0.359638] lock eal - * 0xd80 [41.306409] delta [0.000002] value 0x00000000000001b7 - * 0xd90 [41.306411] delta [0.000002] value 0x0000000000000001 - * 0xda0 [41.306413] delta [0.000002] value 0x0000000001000348 - * - * "eal" indicates we are holding a lock with interrupts OFF. The next value - * is the line number of where the lock was acquired. The second number is the - * number of other locks held whilst this lock is held and the subsequent - * numbers list each lock and the line number of it's holder. e.g. to find - * the locks :- - * - * grep -rn lock --include *.c | grep 840 (search for lock at line 0x348) - * src/drivers/dw-dma.c:840: spinlock_init(&dma->lock); - * - * grep -rn lock --include *.c | grep 439 - * src/lib/alloc.c:439: k_spin_lock_irq(&memmap.lock, flags); - * - * Every lock entry and exit shows LcE and LcX in trace alongside the lock - * line numbers in hex. e.g. - * - * 0xfd60 [11032.730567] delta [0.000004] lock LcE - * 0xfd70 [11032.730569] delta [0.000002] value 0x00000000000000ae - * - * Deadlock can be confirmed in rmbox :- - * - * Debug log: - * debug: 0x0 (00) = 0xdead0007 (-559087609) |....| - * .... - * Error log: - * using 19.20MHz timestamp clock - * 0xc30 [26.247240] delta [26.245851] lock DED - * 0xc40 [26.247242] delta [0.000002] value 0x00000000000002b4 - * 0xc50 [26.247244] delta [0.000002] value 0x0000000000000109 - * - * DED means deadlock has been detected and the DSP is now halted. The first - * value after DEA is the line number where deadlock occurs and the second - * number is the line number where the lock is allocated. These can be grepped - * like above. - */ - -#if CONFIG_DEBUG_LOCKS - -#include -#include -#include -#include - -#define DBG_LOCK_USERS 8 -#define DBG_LOCK_TRIES 10000 - -extern uint32_t lock_dbg_atomic; -extern uint32_t lock_dbg_user[DBG_LOCK_USERS]; - -extern struct tr_ctx sl_tr; - -/* panic on deadlock */ -#define spin_try_lock_dbg(lock, line) \ - do { \ - int __tries; \ - for (__tries = DBG_LOCK_TRIES; __tries > 0; __tries--) { \ - if (arch_try_lock(lock)) \ - break; /* lock acquired */ \ - } \ - if (__tries == 0) { \ - tr_err_atomic(&sl_tr, "DED"); \ - tr_err_atomic(&sl_tr, "line: %d", line); \ - tr_err_atomic(&sl_tr, "user: %d", (lock)->user); \ - panic(SOF_IPC_PANIC_DEADLOCK); /* lock not acquired */ \ - } \ - } while (0) - -#if CONFIG_DEBUG_LOCKS_VERBOSE -#define spin_lock_log(lock, line) \ - do { \ - if (lock_dbg_atomic) { \ - int __i = 0; \ - int __count = lock_dbg_atomic >= DBG_LOCK_USERS \ - ? DBG_LOCK_USERS : lock_dbg_atomic; \ - tr_err_atomic(&sl_tr, "eal"); \ - tr_err_atomic(&sl_tr, "line: %d", line); \ - tr_err_atomic(&sl_tr, "dbg_atomic: %d", lock_dbg_atomic); \ - for (__i = 0; __i < __count; __i++) { \ - tr_err_atomic(&sl_tr, "value: %d", \ - (lock_dbg_atomic << 24) | \ - lock_dbg_user[__i]); \ - } \ - } \ - } while (0) - -#define spin_lock_dbg(line) \ - do { \ - tr_info(&sl_tr, "LcE"); \ - tr_info(&sl_tr, "line: %d", line); \ - } while (0) - -#define spin_unlock_dbg(line) \ - do { \ - tr_info(&sl_tr, "LcX"); \ - tr_info(&sl_tr, "line: %d", line); \ - } while (0) - -#else /* CONFIG_DEBUG_LOCKS_VERBOSE */ -#define spin_lock_log(lock, line) do {} while (0) -#define spin_lock_dbg(line) do {} while (0) -#define spin_unlock_dbg(line) do {} while (0) -#endif /* CONFIG_DEBUG_LOCKS_VERBOSE */ - -#else /* CONFIG_DEBUG_LOCKS */ - -#define trace_lock(__e) do {} while (0) -#define tracev_lock(__e) do {} while (0) - -#define spin_lock_dbg(line) do {} while (0) -#define spin_unlock_dbg(line) do {} while (0) - -#endif /* CONFIG_DEBUG_LOCKS */ - -/* all SMP spinlocks need init, nothing todo on UP */ -static inline void _spinlock_init(struct k_spinlock *lock, int line) -{ - arch_spinlock_init(lock); -#if CONFIG_DEBUG_LOCKS - lock->user = line; -#endif -} - -#define k_spinlock_init(lock) _spinlock_init(lock, __LINE__) - -/* disables all IRQ sources and takes lock - enter atomic context */ -k_spinlock_key_t _k_spin_lock_irq(struct k_spinlock *lock); -#define k_spin_lock(lock) _k_spin_lock_irq(lock) - -/* re-enables current IRQ sources and releases lock - leave atomic context */ -void _k_spin_unlock_irq(struct k_spinlock *lock, k_spinlock_key_t key, int line); -#define k_spin_unlock(lock, key) _k_spin_unlock_irq(lock, key, __LINE__) - -#endif /* __XTOS_RTOS_SPINLOCK_H__ */ diff --git a/xtos/include/rtos/string.h b/xtos/include/rtos/string.h deleted file mode 100644 index 6959475f8c96..000000000000 --- a/xtos/include/rtos/string.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __XTOS_RTOS_STRING_H__ -#define __XTOS_RTOS_STRING_H__ - -#include -#include - -/* C memcpy for arch that don't have arch_memcpy() */ -void cmemcpy(void *dest, void *src, size_t size); -int memcmp(const void *p, const void *q, size_t count); -int rstrlen(const char *s); -int rstrcmp(const char *s1, const char *s2); - -#if defined(arch_memcpy) -#define rmemcpy(dest, src, size) \ - arch_memcpy(dest, src, size) -#else -#define rmemcpy(dest, src, size) \ - cmemcpy(dest, src, size) -#endif - -#endif /* __XTOS_RTOS_STRING_H__ */ diff --git a/xtos/include/rtos/symbol.h b/xtos/include/rtos/symbol.h deleted file mode 100644 index e234945cb789..000000000000 --- a/xtos/include/rtos/symbol.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2023 Intel Corporation. All rights reserved. - */ - -#ifndef __RTOS_SYMBOL_H__ -#define __RTOS_SYMBOL_H__ - -#define EXPORT_SYMBOL(x) - -#endif diff --git a/xtos/include/rtos/task.h b/xtos/include/rtos/task.h deleted file mode 100644 index ebe096a40af1..000000000000 --- a/xtos/include/rtos/task.h +++ /dev/null @@ -1,123 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __XTOS_RTOS_TASK_H__ -#define __XTOS_RTOS_TASK_H__ - -#include -#include -#include -#include -#include -#include -#include - -struct comp_dev; -struct sof; - -/** \brief Predefined LL task priorities. */ -#define SOF_TASK_PRI_HIGH 0 /* priority level 0 - high */ -#define SOF_TASK_PRI_MED 4 /* priority level 4 - medium */ -#define SOF_TASK_PRI_LOW 9 /* priority level 9 - low */ - -/** \brief Predefined EDF task deadlines. */ -#define SOF_TASK_DEADLINE_IDLE UINT64_MAX -#define SOF_TASK_DEADLINE_ALMOST_IDLE (SOF_TASK_DEADLINE_IDLE - 1) -#define SOF_TASK_DEADLINE_NOW 0 - -/** \brief Task counter initial value. */ -#define SOF_TASK_SKIP_COUNT 0xFFFFu - -/** \brief Task states. */ -enum task_state { - SOF_TASK_STATE_INIT = 0, - SOF_TASK_STATE_QUEUED, - SOF_TASK_STATE_PENDING, - SOF_TASK_STATE_RUNNING, - SOF_TASK_STATE_PREEMPTED, - SOF_TASK_STATE_COMPLETED, - SOF_TASK_STATE_FREE, - SOF_TASK_STATE_CANCEL, - SOF_TASK_STATE_RESCHEDULE, -}; - -/** \brief Task operations. */ -struct task_ops { - enum task_state (*run)(void *data); /**< task's main operation */ - void (*complete)(void *data); /**< executed on completion */ - uint64_t (*get_deadline)(void *data); /**< returns current deadline */ -}; - -/** \brief Task used by schedulers. */ -struct task { - uint64_t start; /**< start time in [ms] since now (LL only) */ - const struct sof_uuid_entry *uid; /**< Uuid */ - uint16_t type; /**< type of the task (LL or EDF) */ - uint16_t priority; /**< priority of the task (used by LL) */ - uint16_t core; /**< execution core */ - uint16_t flags; /**< custom flags */ - enum task_state state; /**< current state */ - void *data; /**< custom data passed to all ops */ - struct list_item list; /**< used by schedulers to hold tasks */ - void *priv_data; /**< task private data */ - struct task_ops ops; /**< task operations */ -#if defined(CONFIG_SCHEDULE_LOG_CYCLE_STATISTICS) - uint32_t cycles_sum; - uint32_t cycles_max; - uint32_t cycles_cnt; -#endif -#if CONFIG_PERFORMANCE_COUNTERS - struct perf_cnt_data pcd; -#endif -}; - -static inline bool task_is_active(struct task *task) -{ - switch (task->state) { - case SOF_TASK_STATE_QUEUED: - case SOF_TASK_STATE_PENDING: - case SOF_TASK_STATE_RUNNING: - case SOF_TASK_STATE_PREEMPTED: - case SOF_TASK_STATE_RESCHEDULE: - return true; - default: - return false; - } -} - -static inline enum task_state task_run(struct task *task) -{ - assert(task->ops.run); - - return task->ops.run(task->data); -} - -static inline void task_complete(struct task *task) -{ - if (task->ops.complete) - task->ops.complete(task->data); -} - -static inline uint64_t task_get_deadline(struct task *task) -{ - assert(task->ops.get_deadline); - - return task->ops.get_deadline(task->data); -} - -enum task_state task_main_primary_core(void *data); - -enum task_state task_main_secondary_core(void *data); - -void task_main_init(void); - -void task_main_free(void); - -int task_main_start(struct sof *sof); -int start_complete(void); - -#endif /* __XTOS_RTOS_TASK_H__ */ diff --git a/xtos/include/rtos/timer.h b/xtos/include/rtos/timer.h deleted file mode 100644 index b9756b66c87c..000000000000 --- a/xtos/include/rtos/timer.h +++ /dev/null @@ -1,132 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Janusz Jankowski - */ - -#ifndef __SOF_DRIVERS_TIMER_H__ -#define __SOF_DRIVERS_TIMER_H__ - -#include -#include -#include -#include -#include -#include - -struct comp_dev; -struct sof_ipc_stream_posn; - -#define TIMER0 0 -#define TIMER1 1 -#define TIMER2 2 -#define TIMER3 3 -#define TIMER4 4 - -int timer_register(struct timer *timer, void (*handler)(void *arg), void *arg); -void timer_unregister(struct timer *timer, void *arg); -void timer_enable(struct timer *timer, void *arg, int core); -void timer_disable(struct timer *timer, void *arg, int core); - -static inline struct timer *timer_get(void) -{ - return sof_get()->platform_timer; -} - -static inline struct timer *cpu_timer_get(void) -{ - return &(sof_get()->cpu_timers[cpu_get_id()]); -} - -static inline int64_t timer_set(struct timer *timer, uint64_t ticks) -{ - return arch_timer_set(timer, ticks); -} - -void timer_set_ms(struct timer *timer, unsigned int ms); - -static inline void timer_clear(struct timer *timer) -{ - arch_timer_clear(timer); -} - -unsigned int timer_get_count(struct timer *timer); - -unsigned int timer_get_count_delta(struct timer *timer); - -static inline uint64_t timer_get_system(struct timer *timer) -{ - return arch_timer_get_system(timer); -} - -int64_t platform_timer_set(struct timer *timer, uint64_t ticks); -void platform_timer_clear(struct timer *timer); -uint64_t platform_timer_get(struct timer *timer); -uint64_t platform_timer_get_atomic(struct timer *timer); - -static inline uint64_t platform_safe_get_time(struct timer *timer) -{ - /* Default to something small but at least 1.0 microsecond so it - * does not look like an uninitialized zero; not even when the - * user does not request any microseconds decimals. See - * DEFAULT_CLOCK constant in logger.c - */ - return timer ? platform_timer_get(timer) : 50; -} - -void platform_timer_start(struct timer *timer); -void platform_timer_stop(struct timer *timer); - -static inline uint64_t k_ms_to_cyc_ceil64(uint64_t ms) -{ - return clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, ms); -} - -static inline uint64_t k_us_to_cyc_ceil64(uint64_t us) -{ - return clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, us); -} - -static inline uint64_t k_ns_to_cyc_near64(uint64_t ns) -{ - return clock_ns_to_ticks(PLATFORM_DEFAULT_CLOCK, ns); -} - -static inline uint64_t k_cyc_to_ms_near64(uint64_t ticks) -{ - return ticks / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); -} - -static inline uint64_t k_cyc_to_us_near64(uint64_t ticks) -{ - return ticks / clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); -} - -static inline uint64_t sof_cycle_get_64(void) -{ - return platform_timer_get(timer_get()); -} - -static inline uint64_t sof_cycle_get_64_atomic(void) -{ - return platform_timer_get_atomic(timer_get()); -} - -static inline uint64_t sof_cycle_get_64_safe(void) -{ - return platform_safe_get_time(timer_get()); -} - -/* get timestamp for host stream DMA position */ -void platform_host_timestamp(struct comp_dev *host, - struct sof_ipc_stream_posn *posn); - -/* get timestamp for DAI stream DMA position */ -void platform_dai_timestamp(struct comp_dev *dai, - struct sof_ipc_stream_posn *posn); - -/* get current wallclock for componnent */ -void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock); - -#endif /* __SOF_DRIVERS_TIMER_H__ */ diff --git a/xtos/include/rtos/wait.h b/xtos/include/rtos/wait.h deleted file mode 100644 index 03bbcadd506d..000000000000 --- a/xtos/include/rtos/wait.h +++ /dev/null @@ -1,67 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -/* - * Simple wait for event completion and signaling with timeouts. - */ - -#ifndef __XTOS_RTOS_WAIT_H__ -#define __XTOS_RTOS_WAIT_H__ - -#include -#include - -#if !CONFIG_LIBRARY -#include -#include -#include -#include -#include -#include - -extern struct tr_ctx wait_tr; - -static inline void wait_for_interrupt(int level) -{ - LOG_MODULE_DECLARE(wait, CONFIG_SOF_LOG_LEVEL); - - tr_dbg(&wait_tr, "WFE"); -#if CONFIG_DEBUG_LOCKS - if (lock_dbg_atomic) - tr_err_atomic(&wait_tr, "atm"); -#endif - platform_wait_for_interrupt(level); - tr_dbg(&wait_tr, "WFX"); -} - -/** - * \brief Waits at least passed number of clocks. - * \param[in] number_of_clks Minimum number of clocks to wait. - */ -void wait_delay(uint64_t number_of_clks); - -/** - * \brief Waits at least passed number of milliseconds. - * \param[in] ms Minimum number of milliseconds to wait. - */ -void wait_delay_ms(uint64_t ms); - -/** - * \brief Waits at least passed number of microseconds. - * \param[in] us Minimum number of microseconds to wait. - */ -void wait_delay_us(uint64_t us); -#else -static inline void wait_delay(uint64_t number_of_clks) {} -static inline void wait_delay_ms(uint64_t ms) {} -static inline void wait_delay_us(uint64_t us) {} -#endif - -int poll_for_register_delay(uint32_t reg, uint32_t mask, - uint32_t val, uint64_t us); - -#endif /* __XTOS_RTOS_WAIT_H__ */ diff --git a/xtos/include/sof/compiler_info.h b/xtos/include/sof/compiler_info.h deleted file mode 100644 index 9716575b7188..000000000000 --- a/xtos/include/sof/compiler_info.h +++ /dev/null @@ -1,19 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Karol Trzcinski - */ - -/** - * \file xtos/include/sof/compiler_info.h - * \brief Compiler version and name descriptor - * \author Karol Trzcinski - */ - -#ifndef __SOF_COMPILER_INFO_H__ -#define __SOF_COMPILER_INFO_H__ - -#include - -#endif /* __SOF_COMPILER_INFO_H__ */ diff --git a/xtos/include/sof/init.h b/xtos/include/sof/init.h deleted file mode 100644 index ce8522cd34ad..000000000000 --- a/xtos/include/sof/init.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __SOF_INIT_H__ -#define __SOF_INIT_H__ - -struct sof; - -/* main firmware entry point - argc and argv not currently used */ -#ifndef CONFIG_ARCH_POSIX -int main(int argc, char *argv[]); -#endif - -#if CONFIG_MULTICORE - -int secondary_core_init(struct sof *sof); - -#endif /* CONFIG_MULTICORE */ - -int arch_init(void); - -#endif /* __SOF_INIT_H__ */ diff --git a/xtos/include/sof/lib/cpu.h b/xtos/include/sof/lib/cpu.h deleted file mode 100644 index d53e15e7535a..000000000000 --- a/xtos/include/sof/lib/cpu.h +++ /dev/null @@ -1,93 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -/** - * \file xtos/include/sof/lib/cpu.h - * \brief CPU header file - * \authors Tomasz Lauda - */ - -#ifndef __SOF_LIB_CPU_H__ -#define __SOF_LIB_CPU_H__ -#ifndef __ZEPHYR__ - -#include - -#if !defined(__ASSEMBLER__) && !defined(LINKER) - -#include -#include - -/* let the compiler optimise when in single core mode */ -#if CONFIG_CORE_COUNT == 1 - -static inline int cpu_get_id(void) -{ - return 0; -} - -static inline bool cpu_is_primary(int id) -{ - return 1; -} - -static inline bool cpu_is_me(int id) -{ - return 1; -} - -#else - -static inline int cpu_get_id(void) -{ - return arch_cpu_get_id(); -} - -static inline bool cpu_is_primary(int id) -{ - return id == PLATFORM_PRIMARY_CORE_ID; -} - -static inline bool cpu_is_me(int id) -{ - return id == cpu_get_id(); -} -#endif - -static inline int cpu_enable_core(int id) -{ - return arch_cpu_enable_core(id); -} - -static inline void cpu_disable_core(int id) -{ - arch_cpu_disable_core(id); -} - -static inline int cpu_is_core_enabled(int id) -{ - return arch_cpu_is_core_enabled(id); -} - -static inline int cpu_enabled_cores(void) -{ - return arch_cpu_enabled_cores(); -} - -static inline int cpu_restore_secondary_cores(void) -{ - return arch_cpu_restore_secondary_cores(); -} - -static inline int cpu_secondary_cores_prepare_d0ix(void) -{ - return arch_cpu_secondary_cores_prepare_d0ix(); -} - -#endif -#endif -#endif /* __SOF_LIB_CPU_H__ */ diff --git a/xtos/include/sof/lib/dai.h b/xtos/include/sof/lib/dai.h deleted file mode 100644 index d1fd463eef3f..000000000000 --- a/xtos/include/sof/lib/dai.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Keyon Jie - */ - -/** - * \file xtos/include/sof/lib/dai.h - * \brief DAI Drivers definition - * \author Liam Girdwood - * \author Keyon Jie - */ - -#ifdef __ZEPHYR__ -#error "Please use zephyr/include/sof/lib/dai.h instead" -#endif - -#ifndef __SOF_LIB_DAI_H__ -#define __SOF_LIB_DAI_H__ - -#include - -#endif /* __SOF_LIB_DAI_H__ */ diff --git a/xtos/include/sof/lib/dma.h b/xtos/include/sof/lib/dma.h deleted file mode 100644 index c11c6c75bd72..000000000000 --- a/xtos/include/sof/lib/dma.h +++ /dev/null @@ -1,597 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Keyon Jie - */ - -/** - * \file xtos/include/sof/lib/dma.h - * \brief DMA Drivers definition - * \author Liam Girdwood - * \author Keyon Jie - */ - -#ifndef __SOF_LIB_DMA_H__ -#define __SOF_LIB_DMA_H__ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __ZEPHYR__ -#error "Please use zephyr/include/sof/lib/dma.h instead" -#endif - -struct comp_buffer; -struct comp_dev; - -/** \addtogroup sof_dma_drivers DMA Drivers - * DMA Drivers API specification. - * @{ - */ - -/* DMA direction bitmasks used to define DMA copy direction */ -#define DMA_DIR_MEM_TO_MEM BIT(0) /**< local memory copy */ -#define DMA_DIR_HMEM_TO_LMEM BIT(1) /**< host memory to local mem copy */ -#define DMA_DIR_LMEM_TO_HMEM BIT(2) /**< local mem to host mem copy */ -#define DMA_DIR_MEM_TO_DEV BIT(3) /**< local mem to dev copy */ -#define DMA_DIR_DEV_TO_MEM BIT(4) /**< dev to local mem copy */ -#define DMA_DIR_DEV_TO_DEV BIT(5) /**< dev to dev copy */ -#define SOF_DMA_DIR_MEM_TO_MEM DMA_DIR_MEM_TO_MEM -#define SOF_DMA_DIR_HMEM_TO_LMEM DMA_DIR_HMEM_TO_LMEM -#define SOF_DMA_DIR_LMEM_TO_HMEM DMA_DIR_LMEM_TO_HMEM -#define SOF_DMA_DIR_MEM_TO_DEV DMA_DIR_MEM_TO_DEV -#define SOF_DMA_DIR_DEV_TO_MEM DMA_DIR_DEV_TO_MEM -#define SOF_DMA_DIR_DEV_TO_DEV DMA_DIR_DEV_TO_DEV - -/* DMA capabilities bitmasks used to define the type of DMA */ -#define DMA_CAP_HDA BIT(0) /**< HDA DMA */ -#define DMA_CAP_GP_LP BIT(1) /**< GP LP DMA */ -#define DMA_CAP_GP_HP BIT(2) /**< GP HP DMA */ -#define DMA_CAP_BT BIT(3) /**< BT DMA */ -#define DMA_CAP_SP BIT(4) /**< SP DMA */ -#define DMA_CAP_DMIC BIT(5) /**< ACP DMA DMIC > */ -#define DMA_CAP_SP_VIRTUAL BIT(6) /**< SP VIRTUAL DMA */ -#define DMA_CAP_HS_VIRTUAL BIT(7) /**< HS VIRTUAL DMA */ -#define DMA_CAP_HS BIT(8) /**< HS DMA */ -#define DMA_CAP_SW BIT(9) /**< SW DMA */ - -/* DMA dev type bitmasks used to define the type of DMA */ - -#define DMA_DEV_HOST BIT(0) /**< connectable to host */ -#define DMA_DEV_HDA BIT(1) /**< connectable to HD/A link */ -#define DMA_DEV_SSP BIT(2) /**< connectable to SSP fifo */ -#define DMA_DEV_DMIC BIT(3) /**< connectable to DMIC fifo */ -#define DMA_DEV_SSI BIT(4) /**< connectable to SSI / SPI fifo */ -#define DMA_DEV_ALH BIT(5) /**< connectable to ALH link */ -#define DMA_DEV_SAI BIT(6) /**< connectable to SAI fifo */ -#define DMA_DEV_ESAI BIT(7) /**< connectable to ESAI fifo */ -#define DMA_DEV_BT BIT(8) /**< connectable to ACP BT I2S */ -#define DMA_DEV_SP BIT(9) /**< connectable to ACP SP I2S */ -#define DMA_DEV_AFE_MEMIF BIT(10) /**< connectable to AFE fifo */ -#define DMA_DEV_SP_VIRTUAL BIT(11) /**< connectable to ACP SP VIRTUAL I2S */ -#define DMA_DEV_HS_VIRTUAL BIT(12) /**< connectable to ACP HS VIRTUAL I2S */ -#define DMA_DEV_HS BIT(13) /**< connectable to ACP HS I2S */ -#define DMA_DEV_MICFIL BIT(14) /**< connectable to MICFIL fifo */ -#define DMA_DEV_SW BIT(15) /**< connectable to ACP SW */ -#define SOF_DMA_DEV_HOST DMA_DEV_HOST -#define SOF_DMA_DEV_SAI DMA_DEV_SAI -#define SOF_DMA_DEV_ESAI DMA_DEV_ESAI -#define SOF_DMA_DEV_MICFIL DMA_DEV_MICFIL -#define SOF_DMA_DEV_AFE_MEMIF DMA_DEV_AFE_MEMIF - -/* DMA access privilege flag */ -#define DMA_ACCESS_EXCLUSIVE 1 -#define DMA_ACCESS_SHARED 0 -#define SOF_DMA_ACCESS_EXCLUSIVE DMA_ACCESS_EXCLUSIVE -#define SOF_DMA_ACCESS_SHARED DMA_ACCESS_SHARED - -/* DMA copy flags */ -#define DMA_COPY_BLOCKING BIT(0) -#define DMA_COPY_ONE_SHOT BIT(1) - -/* We will use this enum in cb handler to inform dma what - * action we need to perform. - */ -enum dma_cb_status { - DMA_CB_STATUS_RELOAD = 0, - DMA_CB_STATUS_END, -}; - -#define SOF_DMA_CB_STATUS_RELOAD DMA_CB_STATUS_RELOAD -#define SOF_DMA_CB_STATUS_END DMA_CB_STATUS_END - -/* DMA interrupt commands */ -enum dma_irq_cmd { - DMA_IRQ_STATUS_GET = 0, - DMA_IRQ_CLEAR, - DMA_IRQ_MASK, - DMA_IRQ_UNMASK -}; - -#define DMA_CHAN_INVALID 0xFFFFFFFF -#define DMA_CORE_INVALID 0xFFFFFFFF -#define SOF_DMA_CHAN_INVALID DMA_CHAN_INVALID -#define SOF_DMA_CORE_INVALID DMA_CORE_INVALID - -/* DMA attributes */ -#define DMA_ATTR_BUFFER_ALIGNMENT 0 -#define DMA_ATTR_COPY_ALIGNMENT 1 -#define DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT 2 -#define DMA_ATTR_BUFFER_PERIOD_COUNT 3 - -struct dma; - -/** - * \brief Element of SG list (as array item). - */ -struct dma_sg_elem { - uint32_t src; /**< source address */ - uint32_t dest; /**< destination address */ - uint32_t size; /**< size (in bytes) */ -}; - -/** - * \brief Data used in DMA callbacks. - */ -struct dma_cb_data { - struct dma_chan_data *channel; - struct dma_sg_elem elem; - enum dma_cb_status status; -}; - -/** - * \brief SG elem array. - */ -struct dma_sg_elem_array { - uint32_t count; /**< number of elements in elems */ - struct dma_sg_elem *elems; /**< elements */ -}; - -/* DMA physical SG params */ -struct dma_sg_config { - uint32_t src_width; /* in bytes */ - uint32_t dest_width; /* in bytes */ - uint32_t burst_elems; - uint32_t direction; - uint32_t src_dev; - uint32_t dest_dev; - uint32_t cyclic; /* circular buffer */ - uint64_t period; - struct dma_sg_elem_array elem_array; /* array of dma_sg elems */ - bool scatter; - bool irq_disabled; - /* true if configured DMA channel is the scheduling source */ - bool is_scheduling_source; -}; - -struct dma_chan_status { - uint32_t state; - uint32_t flags; - uint32_t w_pos; - uint32_t r_pos; - uint32_t timestamp; - - /* dma position info for ipc4 */ - void *ipc_posn_data; -}; - -/* DMA operations */ -struct dma_ops { - - struct dma_chan_data *(*channel_get)(struct dma *dma, - unsigned int req_channel); - void (*channel_put)(struct dma_chan_data *channel); - - int (*start)(struct dma_chan_data *channel); - int (*stop)(struct dma_chan_data *channel); - int (*stop_delayed)(struct dma_chan_data *channel); - int (*copy)(struct dma_chan_data *channel, int bytes, uint32_t flags); - int (*pause)(struct dma_chan_data *channel); - int (*release)(struct dma_chan_data *channel); - int (*status)(struct dma_chan_data *channel, - struct dma_chan_status *status, uint8_t direction); - - int (*set_config)(struct dma_chan_data *channel, - struct dma_sg_config *config); - - int (*probe)(struct dma *dma); - int (*remove)(struct dma *dma); - - int (*get_data_size)(struct dma_chan_data *channel, uint32_t *avail, - uint32_t *free); - - int (*get_attribute)(struct dma *dma, uint32_t type, uint32_t *value); - - int (*interrupt)(struct dma_chan_data *channel, enum dma_irq_cmd cmd); -}; - -/* DMA platform data */ -struct dma_plat_data { - uint32_t id; - uint32_t dir; /* bitmask of supported copy directions */ - uint32_t caps; /* bitmask of supported capabilities */ - uint32_t devs; /* bitmask of supported devs */ - uint32_t base; - uint32_t channels; - int irq; - const char *irq_name; - uint32_t chan_size; - const void *drv_plat_data; -}; - -struct dma { - struct dma_plat_data plat_data; - struct k_spinlock lock; /**< locking mechanism */ - int sref; /**< simple ref counter, guarded by lock */ - const struct dma_ops *ops; - atomic_t num_channels_busy; /* number of busy channels */ - struct dma_chan_data *chan; /* channels array */ - void *priv_data; -}; - -struct dma_chan_data { - struct dma *dma; - - uint32_t status; - uint32_t direction; - uint32_t desc_count; - uint32_t index; - uint32_t core; - uint64_t period; /* DMA channel's transfer period in us */ - /* true if this DMA channel is the scheduling source */ - bool is_scheduling_source; - - /* device specific data set by the device that requests the DMA channel */ - void *dev_data; - - void *priv_data; -}; - -struct dma_info { - struct dma *dma_array; - size_t num_dmas; -}; - -struct audio_stream; -typedef int (*dma_process_func)(const struct audio_stream *source, - uint32_t ioffset, struct audio_stream *sink, - uint32_t ooffset, uint32_t source_samples, uint32_t chmap); - -/** - * \brief API to initialize a platform DMA controllers. - * - * \param[in] sof Pointer to firmware main context. - */ -int dmac_init(struct sof *sof); - -/** - * \brief API to request a platform DMAC. - * - * Users can request DMAC based on dev type, copy direction, capabilities - * and access privilege. - * For exclusive access, ret DMAC with no channels draining. - * For shared access, ret DMAC with the least number of channels draining. - */ -struct dma *dma_get(uint32_t dir, uint32_t caps, uint32_t dev, uint32_t flags); - -/** - * \brief API to release a platform DMAC. - * - * @param[in] dma DMAC to relese. - */ -void dma_put(struct dma *dma); - -#define dma_set_drvdata(dma, data) \ - (dma->priv_data = data) -#define dma_get_drvdata(dma) \ - dma->priv_data -#define dma_base(dma) \ - dma->plat_data.base -#define dma_irq(dma) \ - dma->plat_data.irq -#define dma_irq_name(dma) \ - dma->plat_data.irq_name -#define dma_chan_size(dma) \ - dma->plat_data.chan_size -#define dma_chan_base(dma, chan) \ - (dma->plat_data.base + chan * dma->plat_data.chan_size) -#define dma_chan_get_data(chan) \ - ((chan)->priv_data) -#define dma_chan_set_data(chan, data) \ - ((chan)->priv_data = data) - -/* DMA API - * Programming flow is :- - * - * 1) dma_channel_get() - * 2) notifier_register() - * 3) dma_set_config() - * 4) dma_start() - * ... DMA now running ... - * 5) dma_stop() - * 6) dma_stop_delayed() - * 7) dma_channel_put() - */ - -static inline struct dma_chan_data *dma_channel_get_legacy(struct dma *dma, - int req_channel) -{ - if (!dma || !dma->ops || !dma->ops->channel_get) - return NULL; - - struct dma_chan_data *chan = dma->ops->channel_get(dma, req_channel); - - return chan; -} - -static inline void dma_channel_put_legacy(struct dma_chan_data *channel) -{ - channel->dma->ops->channel_put(channel); -} - -static inline int dma_start_legacy(struct dma_chan_data *channel) -{ - return channel->dma->ops->start(channel); -} - -static inline int dma_stop_legacy(struct dma_chan_data *channel) -{ - if (channel->dma->ops->stop) - return channel->dma->ops->stop(channel); - - return 0; -} - -static inline int dma_stop_delayed_legacy(struct dma_chan_data *channel) -{ - if (channel->dma->ops->stop_delayed) - return channel->dma->ops->stop_delayed(channel); - - return 0; -} - -/** \defgroup sof_dma_copy_func static int dma_copy (struct dma_chan_data * channel, int bytes, uint32_t flags) - * - * This function is in a separate subgroup to solve a name clash with - * struct dma_copy {} - * @{ - */ -static inline int dma_copy_legacy(struct dma_chan_data *channel, int bytes, - uint32_t flags) -{ - return channel->dma->ops->copy(channel, bytes, flags); -} -/** @} */ - -static inline int dma_pause_legacy(struct dma_chan_data *channel) -{ - if (channel->dma->ops->pause) - return channel->dma->ops->pause(channel); - - return 0; -} - -static inline int dma_release_legacy(struct dma_chan_data *channel) -{ - if (channel->dma->ops->release) - return channel->dma->ops->release(channel); - - return 0; -} - -static inline int dma_status_legacy(struct dma_chan_data *channel, - struct dma_chan_status *status, uint8_t direction) -{ - return channel->dma->ops->status(channel, status, direction); -} - -static inline int dma_set_config_legacy(struct dma_chan_data *channel, - struct dma_sg_config *config) -{ - return channel->dma->ops->set_config(channel, config); -} - -static inline int dma_probe_legacy(struct dma *dma) -{ - return dma->ops->probe(dma); -} - -static inline int dma_remove_legacy(struct dma *dma) -{ - return dma->ops->remove(dma); -} - -static inline int dma_get_data_size_legacy(struct dma_chan_data *channel, - uint32_t *avail, uint32_t *free) -{ - return channel->dma->ops->get_data_size(channel, avail, free); -} - -static inline int dma_get_attribute_legacy(struct dma *dma, uint32_t type, - uint32_t *value) -{ - return dma->ops->get_attribute(dma, type, value); -} - -static inline int dma_interrupt_legacy(struct dma_chan_data *channel, - enum dma_irq_cmd cmd) -{ - return channel->dma->ops->interrupt(channel, cmd); -} - -/* DMA hardware register operations */ -static inline uint32_t dma_reg_read(struct dma *dma, uint32_t reg) -{ - return io_reg_read(dma_base(dma) + reg); -} - -static inline uint16_t dma_reg_read16(struct dma *dma, uint32_t reg) -{ - return io_reg_read16(dma_base(dma) + reg); -} - -static inline void dma_reg_write(struct dma *dma, uint32_t reg, uint32_t value) -{ - io_reg_write(dma_base(dma) + reg, value); -} - -static inline void dma_reg_write16(struct dma *dma, uint32_t reg, - uint16_t value) -{ - io_reg_write16(dma_base(dma) + reg, value); -} - -static inline void dma_reg_update_bits(struct dma *dma, uint32_t reg, - uint32_t mask, uint32_t value) -{ - io_reg_update_bits(dma_base(dma) + reg, mask, value); -} - -static inline uint32_t dma_chan_reg_read(struct dma_chan_data *channel, - uint32_t reg) -{ - return io_reg_read(dma_chan_base(channel->dma, channel->index) + reg); -} - -static inline uint16_t dma_chan_reg_read16(struct dma_chan_data *channel, - uint32_t reg) -{ - return io_reg_read16(dma_chan_base(channel->dma, channel->index) + reg); -} - -static inline void dma_chan_reg_write(struct dma_chan_data *channel, - uint32_t reg, uint32_t value) -{ - io_reg_write(dma_chan_base(channel->dma, channel->index) + reg, value); -} - -static inline void dma_chan_reg_write16(struct dma_chan_data *channel, - uint32_t reg, uint16_t value) -{ - io_reg_write16(dma_chan_base(channel->dma, channel->index) + reg, - value); -} - -static inline void dma_chan_reg_update_bits(struct dma_chan_data *channel, - uint32_t reg, uint32_t mask, - uint32_t value) -{ - io_reg_update_bits(dma_chan_base(channel->dma, channel->index) + reg, - mask, value); -} - -static inline void dma_chan_reg_update_bits16(struct dma_chan_data *channel, - uint32_t reg, uint16_t mask, - uint16_t value) -{ - io_reg_update_bits16(dma_chan_base(channel->dma, channel->index) + reg, - mask, value); -} - -static inline bool dma_is_scheduling_source(struct dma_chan_data *channel) -{ - return channel->is_scheduling_source; -} - -static inline void dma_sg_init(struct dma_sg_elem_array *ea) -{ - ea->count = 0; - ea->elems = NULL; -} - -int dma_sg_alloc(struct dma_sg_elem_array *ea, - enum mem_zone zone, - uint32_t direction, - uint32_t buffer_count, uint32_t buffer_bytes, - uintptr_t dma_buffer_addr, uintptr_t external_addr); - -void dma_sg_free(struct dma_sg_elem_array *ea); - -/** - * \brief Get the total size of SG buffer - * - * \param ea Array of SG elements. - * \return Size of the buffer. - */ -static inline uint32_t dma_sg_get_size(struct dma_sg_elem_array *ea) -{ - int i; - uint32_t size = 0; - - for (i = 0 ; i < ea->count; i++) - size += ea->elems[i].size; - - return size; -} - -/* copies data from DMA buffer using provided processing function */ -int dma_buffer_copy_from(struct comp_buffer *source, - struct comp_buffer *sink, - dma_process_func process, uint32_t source_bytes, uint32_t chmap); - -/* - * Used when copying stream audio into multiple sink buffers, one at a time using the provided - * conversion function. DMA buffer consume should be performed after the data has been copied - * to all sinks. - */ -int stream_copy_from_no_consume(struct comp_dev *dev, struct comp_buffer *source, - struct comp_buffer *sink, dma_process_func process, - uint32_t source_bytes, uint32_t chmap); - -/* copies data to DMA buffer using provided processing function */ -int dma_buffer_copy_to(struct comp_buffer *source, - struct comp_buffer *sink, - dma_process_func process, uint32_t sink_bytes, uint32_t chmap); - -/* generic DMA DSP <-> Host copier */ - -struct dma_copy { - struct dma_chan_data *chan; - struct dma *dmac; -}; - -/* init dma copy context */ -int dma_copy_new(struct dma_copy *dc); - -/* free dma copy context resources */ -static inline void dma_copy_free(struct dma_copy *dc) -{ - dma_channel_put_legacy(dc->chan); -} - -/* DMA copy data from host to DSP */ -int dma_copy_from_host(struct dma_copy *dc, struct dma_sg_config *host_sg, - int32_t host_offset, void *local_ptr, int32_t size); -int dma_copy_from_host_nowait(struct dma_copy *dc, - struct dma_sg_config *host_sg, - int32_t host_offset, void *local_ptr, - int32_t size); - -/* DMA copy data from DSP to host */ -int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, - int32_t host_offset, void *local_ptr, int32_t size); -int dma_copy_to_host_nowait(struct dma_copy *dc, struct dma_sg_config *host_sg, - int32_t host_offset, void *local_ptr, int32_t size); - - -int dma_copy_set_stream_tag(struct dma_copy *dc, uint32_t stream_tag); - -static inline const struct dma_info *dma_info_get(void) -{ - return sof_get()->dma_info; -} - -/** @}*/ - -#endif /* __SOF_LIB_DMA_H__ */ diff --git a/xtos/include/sof/lib/io.h b/xtos/include/sof/lib/io.h deleted file mode 100644 index 0ab94b01e6dc..000000000000 --- a/xtos/include/sof/lib/io.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - */ - -#ifndef __XTOS_SOF_LIB_IO_H__ -#define __XTOS_SOF_LIB_IO_H__ - - -#include - -#if CONFIG_LIBRARY - -static inline uint32_t io_reg_read(uint32_t reg) { return 0; } -static inline void io_reg_write(uint32_t reg, uint32_t val) {} -static inline void io_reg_update_bits(uint32_t reg, uint32_t mask, - uint32_t value) {} -static inline uint16_t io_reg_read16(uint32_t reg) { return 0; } -static inline void io_reg_write16(uint32_t reg, uint16_t val) {} -static inline void io_reg_update_bits16(uint32_t reg, uint16_t mask, - uint16_t value) {} - -#else - -static inline uint32_t io_reg_read(uint32_t reg) -{ - return *((volatile uint32_t*)reg); -} - -static inline void io_reg_write(uint32_t reg, uint32_t val) -{ - *((volatile uint32_t*)reg) = val; -} - -static inline void io_reg_update_bits(uint32_t reg, uint32_t mask, - uint32_t value) -{ - io_reg_write(reg, (io_reg_read(reg) & (~mask)) | (value & mask)); -} - -static inline uint16_t io_reg_read16(uint32_t reg) -{ - return *((volatile uint16_t*)reg); -} - -static inline void io_reg_write16(uint32_t reg, uint16_t val) -{ - *((volatile uint16_t*)reg) = val; -} - -static inline uint64_t io_reg_read64(uint32_t reg) -{ - return (uint64_t)io_reg_read(reg) + - (((uint64_t)io_reg_read(reg + 4)) << 32); -} - -static inline void io_reg_write64(uint32_t reg, uint64_t val) -{ - *((volatile uint64_t*)reg) = val; -} - -static inline void io_reg_update_bits16(uint32_t reg, uint16_t mask, - uint16_t value) -{ - io_reg_write16(reg, (io_reg_read16(reg) & (~mask)) | (value & mask)); -} - -static inline uint8_t io_reg_read8(uint32_t reg) -{ - return *((volatile uint8_t*)reg); -} - -static inline void io_reg_write8(uint32_t reg, uint8_t val) -{ - *((volatile uint8_t*)reg) = val; -} - -static inline void io_reg_update_bits8(uint32_t reg, uint8_t mask, - uint8_t value) -{ - io_reg_write8(reg, (io_reg_read8(reg) & (~mask)) | (value & mask)); -} - -#endif - -#endif /* __XTOS_SOF_LIB_IO_H__ */ diff --git a/xtos/include/sof/lib/memory.h b/xtos/include/sof/lib/memory.h deleted file mode 100644 index 456049a3bddb..000000000000 --- a/xtos/include/sof/lib/memory.h +++ /dev/null @@ -1,24 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2019 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - */ - -#ifndef __SOF_LIB_MEMORY_H__ -#define __SOF_LIB_MEMORY_H__ - -#include - -#ifndef __cold -#define __cold -#endif - -#ifndef __cold_rodata -#define __cold_rodata -#endif - -#define assert_can_be_cold() do {} while (0) -#define dbg_path_hot_confirm() do {} while (0) - -#endif /* __SOF_LIB_MEMORY_H__ */ diff --git a/xtos/include/sof/lib/mm_heap.h b/xtos/include/sof/lib/mm_heap.h deleted file mode 100644 index 931a66ed0cea..000000000000 --- a/xtos/include/sof/lib/mm_heap.h +++ /dev/null @@ -1,117 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Liam Girdwood - * Keyon Jie - */ - -#ifndef __SOF_LIB_MM_HEAP_H__ -#define __SOF_LIB_MM_HEAP_H__ - -#ifdef __ZEPHYR__ -#error "Please use zephyr/include/sof/lib/mm_heap.h instead" -#endif - -#include -#include -#include -#include -#include -#include - -#include -#include - -struct dma_copy; -struct dma_sg_config; - -struct mm_info { - uint32_t used; - uint32_t free; -}; - -struct block_hdr { - uint16_t size; /* size in blocks for continuous allocation */ - uint16_t used; /* usage flags for page */ - void *unaligned_ptr; /* align ptr */ -} __packed; - -struct block_map { - uint16_t block_size; /* size of block in bytes */ - uint16_t count; /* number of blocks in map */ - uint16_t free_count; /* number of free blocks */ - uint16_t first_free; /* index of first free block */ - struct block_hdr *block; /* base block header */ - uint32_t base; /* base address of space */ -}; - -#define BLOCK_DEF(sz, cnt, hdr) \ - {.block_size = sz, .count = cnt, .free_count = cnt, .block = hdr, \ - .first_free = 0} - -struct mm_heap { - uint32_t blocks; - struct block_map *map; -#if CONFIG_LIBRARY - unsigned long heap; -#else - uint32_t heap; -#endif - uint32_t size; - uint32_t caps; - struct mm_info info; -}; - -/* heap block memory map */ -struct mm { - /* system heap - used during init cannot be freed */ - struct mm_heap system[PLATFORM_HEAP_SYSTEM]; - /* system runtime heap - used for runtime system components */ - struct mm_heap system_runtime[PLATFORM_HEAP_SYSTEM_RUNTIME]; -#if CONFIG_CORE_COUNT > 1 - /* object shared between different cores - used during init cannot be freed */ - struct mm_heap system_shared[PLATFORM_HEAP_SYSTEM_SHARED]; - /* object shared between different cores */ - struct mm_heap runtime_shared[PLATFORM_HEAP_RUNTIME_SHARED]; -#endif - /* general heap for components */ - struct mm_heap runtime[PLATFORM_HEAP_RUNTIME]; - /* general component buffer heap */ - struct mm_heap buffer[PLATFORM_HEAP_BUFFER]; - - struct mm_info total; - uint32_t heap_trace_updated; /* updates that can be presented */ - struct k_spinlock lock; /* all allocs and frees are atomic */ -}; - -/* Heap save/restore contents and context for PM D0/D3 events */ -uint32_t mm_pm_context_size(void); - -/* heap initialisation */ -void init_heap(struct sof *sof); - -/* frees entire heap (supported for secondary core system heap atm) */ -void free_heap(enum mem_zone zone); - -/* status */ -void heap_trace_all(int force); -void heap_trace(struct mm_heap *heap, int size); - -#if CONFIG_DEBUG_MEMORY_USAGE_SCAN -/** Fetch runtime information about heap, like used and free memory space - * @param zone to check, see enum mem_zone. - * @param index heap index, eg. cpu core index for any *SYS* zone - * @param out output variable - * @return error code or zero - */ -int heap_info(enum mem_zone zone, int index, struct mm_info *out); -#endif - -/* retrieve memory map pointer */ -static inline struct mm *memmap_get(void) -{ - return sof_get()->memory_map; -} - -#endif /* __SOF_LIB_MM_HEAP_H__ */ diff --git a/xtos/include/sof/lib/pm_runtime.h b/xtos/include/sof/lib/pm_runtime.h deleted file mode 100644 index c0a24116e629..000000000000 --- a/xtos/include/sof/lib/pm_runtime.h +++ /dev/null @@ -1,176 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2018 Intel Corporation. All rights reserved. - * - * Author: Tomasz Lauda - * Janusz Jankowski - */ - -/** - * \file xtos/include/sof/lib/pm_runtime.h - * \brief Runtime power management header file - * \author Tomasz Lauda - */ - -#ifndef __SOF_LIB_PM_RUNTIME_H__ -#define __SOF_LIB_PM_RUNTIME_H__ - -#include -#include -#include -#include -#include -#include - -/** \addtogroup pm_runtime PM Runtime - * PM runtime specification. - * @{ - */ - -/* PM runtime flags */ - -#define RPM_ASYNC 0x01 /**< Request is asynchronous */ - -/** \brief Runtime power management context */ -enum pm_runtime_context { - PM_RUNTIME_HOST_DMA_L1 = 0, /**< Host DMA L1 */ - SSP_CLK, /**< SSP Clock */ - SSP_POW, /**< SSP Power */ - DMIC_CLK, /**< DMIC Clock */ - DMIC_POW, /**< DMIC Power */ - DW_DMAC_CLK, /**< DW DMAC Clock */ - CORE_MEMORY_POW, /**< Core Memory power */ - CORE_HP_CLK, /**< High Performance Clock*/ - PM_RUNTIME_DSP /**< DSP */ -}; - -/** \brief Runtime power management data. */ -struct pm_runtime_data { - struct k_spinlock lock; /**< lock mechanism */ - void *platform_data; /**< platform specific data */ -#if CONFIG_DSP_RESIDENCY_COUNTERS - struct r_counters_data *r_counters; /**< diagnostic DSP residency counters */ -#endif -}; - -#if CONFIG_DSP_RESIDENCY_COUNTERS -/** - * \brief DSP residency counters - * R0, R1, R2 are DSP residency counters which can be used differently - * based on platform implementation. - * In general R0 is the highest power consumption state while R2 is - * the lowest power consumption state. See platform specific pm_runtime.h - * for the platform HW specific mapping. - */ -enum dsp_r_state { - r0_r_state = 0, - r1_r_state, - r2_r_state -}; - -/** \brief Diagnostic DSP residency counters data */ -struct r_counters_data { - enum dsp_r_state cur_r_state; /**< current dsp_r_state */ - uint64_t ts; /**< dsp_r_state timestamp */ -}; -#endif - -/** - * \brief Initializes runtime power management. - */ -void pm_runtime_init(struct sof *sof); - -/** - * \brief Retrieves power management resource (async). - * - * \param[in] context Type of power management context. - * \param[in] index Index of the device. - */ -void pm_runtime_get(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Retrieves power management resource. - * - * \param[in] context Type of power management context. - * \param[in] index Index of the device. - */ -void pm_runtime_get_sync(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Releases power management resource (async). - * - * \param[in] context Type of power management context. - * \param[in] index Index of the device. - */ -void pm_runtime_put(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Releases power management resource. - * - * \param[in] context Type of power management context. - * \param[in] index Index of the device. - */ -void pm_runtime_put_sync(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Enables power management operations for the resource. - * - * \param[in] context Type of power management context. - * \param[in] index Index of the device. - */ -void pm_runtime_enable(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Disables power management operations for the resource. - * - * \param[in] context Type of power management context. - * \param[in] index Index of the device. - */ -void pm_runtime_disable(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Reports state of the power managed resource. - * - * @param context Type of power management context. - * @param index Index of the resource. - * - * @return true if the resource is active or pm disabled, false otherwise. - */ -bool pm_runtime_is_active(enum pm_runtime_context context, uint32_t index); - -/** - * \brief Retrieves pointer to runtime power management data. - * - * @return Runtime power management data pointer. - */ -static inline struct pm_runtime_data *pm_runtime_data_get(void) -{ - return sof_get()->prd; -} - -#if CONFIG_DSP_RESIDENCY_COUNTERS -/** - * \brief Initializes DSP residency counters. - * - * \param[in] context Type of power management context. - */ -void init_dsp_r_state(enum dsp_r_state); - -/** - * \brief Reports DSP residency state. - * - * \param[in] new state - */ -void report_dsp_r_state(enum dsp_r_state); - -/** - * \brief Retrieves current DSP residency state. - * - * @return active DSP residency state - */ -enum dsp_r_state get_dsp_r_state(void); -#endif - -/** @}*/ - -#endif /* __SOF_LIB_PM_RUNTIME_H__ */ diff --git a/xtos/include/sof/trace/preproc-private-dec.h b/xtos/include/sof/trace/preproc-private-dec.h deleted file mode 100644 index f36df9d38449..000000000000 --- a/xtos/include/sof/trace/preproc-private-dec.h +++ /dev/null @@ -1,98 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Michal Jerzy Wierzbicki - */ - -/* THIS FILE SHOULD NOT BE INCLUDED DIRECTLY */ - -#ifdef __SOF_TRACE_PREPROC_PRIVATE_H__ -/* Macros defined in this file are only helpers for the macros that are - * defined in header file containing "namespace" - * __SOF_TRACE_PREPROC_PRIVATE_H__ . - * This combination of #ifdef and #ifndef should sufficently narrow - * the "include-ability" of this dependent header file. - * If you wish to use macros from this file directly, be *V E R Y* careful! - * HIC SUNT DRACONES - */ -#ifndef __SOF_TRACE_PREPROC_PRIVATE_DEC_H__ -#define __SOF_TRACE_PREPROC_PRIVATE_DEC_H__ - -/* The only sane way I found to decrement values in cpreproc */ -/* for instance META_DEC(3) will be tokenized to DEC_3 - * and then expanded again to 2 - */ -#define _META_DEC_0 0 // notice how we deal with underflow -#define _META_DEC_1 0 -#define _META_DEC_2 1 -#define _META_DEC_3 2 -#define _META_DEC_4 3 -#define _META_DEC_5 4 -#define _META_DEC_6 5 -#define _META_DEC_7 6 -#define _META_DEC_8 7 -#define _META_DEC_9 8 -#define _META_DEC_10 9 -#define _META_DEC_11 10 -#define _META_DEC_12 11 -#define _META_DEC_13 12 -#define _META_DEC_14 13 -#define _META_DEC_15 14 -#define _META_DEC_16 15 -#define _META_DEC_17 16 -#define _META_DEC_18 17 -#define _META_DEC_19 18 -#define _META_DEC_20 19 -#define _META_DEC_21 20 -#define _META_DEC_22 21 -#define _META_DEC_23 22 -#define _META_DEC_24 23 -#define _META_DEC_25 24 -#define _META_DEC_26 25 -#define _META_DEC_27 26 -#define _META_DEC_28 27 -#define _META_DEC_29 28 -#define _META_DEC_30 29 -#define _META_DEC_31 30 -#define _META_DEC_32 31 -#define _META_DEC_33 32 -#define _META_DEC_34 33 -#define _META_DEC_35 34 -#define _META_DEC_36 35 -#define _META_DEC_37 36 -#define _META_DEC_38 37 -#define _META_DEC_39 38 -#define _META_DEC_40 39 -#define _META_DEC_41 40 -#define _META_DEC_42 41 -#define _META_DEC_43 42 -#define _META_DEC_44 43 -#define _META_DEC_45 44 -#define _META_DEC_46 45 -#define _META_DEC_47 46 -#define _META_DEC_48 47 -#define _META_DEC_49 48 -#define _META_DEC_50 49 -#define _META_DEC_51 50 -#define _META_DEC_52 51 -#define _META_DEC_53 52 -#define _META_DEC_54 53 -#define _META_DEC_55 54 -#define _META_DEC_56 55 -#define _META_DEC_57 56 -#define _META_DEC_58 57 -#define _META_DEC_59 58 -#define _META_DEC_60 59 -#define _META_DEC_61 60 -#define _META_DEC_62 61 -#define _META_DEC_63 62 -#define _META_DEC_64 63 - -#endif /* __SOF_TRACE_PREPROC_PRIVATE_DEC_H__ */ -#else - #error \ - Illegal use of header file: \ - can only be included from context of \ - __INCLUDE_MACRO_METAPROGRAMMING_PRIVATE__ -#endif /* __SOF_TRACE_PREPROC_PRIVATE_H__ */ diff --git a/xtos/include/sof/trace/preproc-private-inc.h b/xtos/include/sof/trace/preproc-private-inc.h deleted file mode 100644 index d19205c584cb..000000000000 --- a/xtos/include/sof/trace/preproc-private-inc.h +++ /dev/null @@ -1,98 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Michal Jerzy Wierzbicki - */ - -/* THIS FILE SHOULD NOT BE INCLUDED DIRECTLY */ - -#ifdef __SOF_TRACE_PREPROC_PRIVATE_H__ -/* Macros defined in this file are only helpers for the macros that are - * defined in header file containing "namespace" - * __SOF_TRACE_PREPROC_PRIVATE_H__ . - * This combination of #ifdef and #ifndef should sufficently narrow - * the "include-ability" of this dependent header file. - * If you wish to use macros from this file directly, be *V E R Y* careful! - * HIC SUNT DRACONES - */ -#ifndef __SOF_TRACE_PREPROC_PRIVATE_INC_H__ -#define __SOF_TRACE_PREPROC_PRIVATE_INC_H__ - -/* The only sane way I found to increment values in cpreproc */ -/* for instance META_INC(3) will be tokenized to INC_3 - * and then expanded again to 4 - */ -#define _META_INC_0 1 -#define _META_INC_1 2 -#define _META_INC_2 3 -#define _META_INC_3 4 -#define _META_INC_4 5 -#define _META_INC_5 6 -#define _META_INC_6 7 -#define _META_INC_7 8 -#define _META_INC_8 9 -#define _META_INC_9 10 -#define _META_INC_10 11 -#define _META_INC_11 12 -#define _META_INC_12 13 -#define _META_INC_13 14 -#define _META_INC_14 15 -#define _META_INC_15 16 -#define _META_INC_16 17 -#define _META_INC_17 18 -#define _META_INC_18 19 -#define _META_INC_19 20 -#define _META_INC_20 21 -#define _META_INC_21 22 -#define _META_INC_22 23 -#define _META_INC_23 24 -#define _META_INC_24 25 -#define _META_INC_25 26 -#define _META_INC_26 27 -#define _META_INC_27 28 -#define _META_INC_28 29 -#define _META_INC_29 30 -#define _META_INC_30 31 -#define _META_INC_31 32 -#define _META_INC_32 33 -#define _META_INC_33 34 -#define _META_INC_34 35 -#define _META_INC_35 36 -#define _META_INC_36 37 -#define _META_INC_37 38 -#define _META_INC_38 39 -#define _META_INC_39 40 -#define _META_INC_40 41 -#define _META_INC_41 42 -#define _META_INC_42 43 -#define _META_INC_43 44 -#define _META_INC_44 45 -#define _META_INC_45 46 -#define _META_INC_46 47 -#define _META_INC_47 48 -#define _META_INC_48 49 -#define _META_INC_49 50 -#define _META_INC_50 51 -#define _META_INC_51 52 -#define _META_INC_52 53 -#define _META_INC_53 54 -#define _META_INC_54 55 -#define _META_INC_55 56 -#define _META_INC_56 57 -#define _META_INC_57 58 -#define _META_INC_58 59 -#define _META_INC_59 60 -#define _META_INC_60 61 -#define _META_INC_61 62 -#define _META_INC_62 63 -#define _META_INC_63 64 -#define _META_INC_64 64 // notice how we deal with overflow - -#endif /* __SOF_TRACE_PREPROC_PRIVATE_INC_H__ */ -#else - #error \ - Illegal use of header file: \ - can only be included from context of \ - __INCLUDE_MACRO_METAPROGRAMMING_PRIVATE__ -#endif /* __SOF_TRACE_PREPROC_PRIVATE_H__ */ diff --git a/xtos/include/sof/trace/preproc-private.h b/xtos/include/sof/trace/preproc-private.h deleted file mode 100644 index 37202b5a2044..000000000000 --- a/xtos/include/sof/trace/preproc-private.h +++ /dev/null @@ -1,199 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Michal Jerzy Wierzbicki - */ - -/* THIS FILE SHOULD NOT BE INCLUDED DIRECTLY */ - -#ifdef __SOF_TRACE_PREPROC_H__ -/* Macros defined in this file are only helpers for the macros that are - * defined in header file containing "namespace" - * __SOF_TRACE_PREPROC_H__ . - * This combination of #ifdef and #ifndef should sufficently narrow - * the "include-ability" of this dependent header file. - * If you wish to use macros from this file directly, be *V E R Y* careful! - * HIC SUNT DRACONES - */ -#ifndef __SOF_TRACE_PREPROC_PRIVATE_H__ -#define __SOF_TRACE_PREPROC_PRIVATE_H__ - -/* Include - * #define _META_DEC_0 0 - * #define _META_DEC_1 1 - * #define _META_DEC_2 1 - * #define _META_DEC_3 2 - * ... - * #define _META_DEC_N N-1 - */ -#include -/* Include - * #define _META_INC_0 1 - * #define _META_INC_1 2 - * ... - * #define _META_INC_N-1 N - * #define _META_INC_N N - */ -#include - -/* count number of var args - during preprocesing - * works for predefined number of args - * META_COUNT_VARAGS_BEFORE_COMPILE(A,B,C,D) evaluates to 4 - */ -#define _META_PP_NARG_BEFORE_COMPILE_(...) \ - _META_PP_ARG_N(__VA_ARGS__) - -#define _META_PP_ARG_N(\ - _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, \ - _11, _12, _13, _14, _15, _16, _17, _18, _19, _20, \ - _21, _22, _23, _24, _25, _26, _27, _28, _29, _30, \ - _31, _32, _33, _34, _35, _36, _37, _38, _39, _40, \ - _41, _42, _43, _44, _45, _46, _47, _48, _49, _50, \ - _51, _52, _53, _54, _55, _56, _57, _58, _59, _60, \ - _61, _62, _63, N, ...) N - -#define _META_PP_RSEQ_N() \ - 63, 62, 61, 60, \ - 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, \ - 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, \ - 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, \ - 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, \ - 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, \ - 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 - -/* treat x as string while forcing x expansion beforehand */ -#define _META_QUOTE(x) #x - -/* concat x and y while forcing x and y expansion beforehand */ -#define _META_CONCAT_BASE(x, y) x##y - -/* discard first x-1 args in vararg and return the xth arg */ -#define _META_GET_ARG_1(arg1, ...) arg1 -#define _META_GET_ARG_2(arg1, arg2, ...) arg2 -/* TODO: GET_ARG version for arbitrary x>2 should be possible using - * META_RECURSE(META_REPEAT - */ - -/* _META_IS_PROBE(...) evaluates to 0 when __VA_ARGS__ is single token - * _META_IS_PROBE(PROBE()) evaulates to 1, because it is equivalent to - * _META_GET_ARG_2(~, 1, 0) - * ~ is no special value, it is just a meaningless placeholder, - * it could be something else if that thing would also have no meaning - * but be a valid C - */ -#define _META_IS_PROBE(...) _META_GET_ARG_2(__VA_ARGS__, 0) -#define _META_PROBE() ~, 1 - -/* _META_NOT_0 evaluates to '~, 1' - * _META_NOT_1 evaluates to '_META_NOT_1' (because it is not a macro) - * _META_IS_PROBE(_META_NOT_0) evaluates to 1, because it is equivalent to - * _META_GET_ARG_2(~, 1, 0) - * _META_IS_PROBE(_NOT_1) evaluates to 0, because it is equivalent to - * _META_GET_ARG_2(_NOT_1, 0) - * - * notice, that any x!=0 would also result in 0 - * e.x. META_NOT(123) evaluates to 0 - */ -#define _META_NOT_0 _META_PROBE() - -/* indirection forces condition to be "cast" to 0 1 - * then for 0 discard first (), and for 1 discard second () - * so META_IF_ELSE(0)(a)(b) expands to b, - * and META_IF_ELSE(1)(a)(b) expands to a - */ -#define _META_IF_ELSE(condition) META_CONCAT(_META_IF_, condition) - -#define _META_IF_1(...) __VA_ARGS__ _META_IF_1_ELSE -#define _META_IF_0(...) _META_IF_0_ELSE - -#define _META_IF_1_ELSE(...) -#define _META_IF_0_ELSE(...) __VA_ARGS__ - -#define _META_IIF(condition) META_CONCAT(_META_IIF_, condition) -#define _META_IIF_0(x, ...) __VA_ARGS__ -#define _META_IIF_1(x, ...) x - -/* primitive recursion */ -#define _META_REQRS_8(...) _META_REQRS_4( _META_REQRS_4 (__VA_ARGS__)) -#define _META_REQRS_4(...) _META_REQRS_2( _META_REQRS_2 (__VA_ARGS__)) -#define _META_REQRS_2(...) _META_REQRS_1( _META_REQRS_1 (__VA_ARGS__)) -#define _META_REQRS_1(...) __VA_ARGS__ - -/* Delay macro m expansion depth times - * IT IS CRUCIAL FOR NO #define _META_EMPTY macro to exist!!! - * _META_DEFER_N(depth) will work for any depth valid in META_REPEAT - * (which is confined only by META_DEC). - * _META_DEFER_N will NOT work inside META_REPEAT, because - * _META_DEFER_N uses META_REPEAT as seen below. - * In order for META_REPEAT to work (which also requires DEFER functionality) - * a duplicate, implicit _META_DEFER_2(m) has to be defined. - * It is because how the c preprocesor works. - */ -#define _META_EMPTY() - -/* Special, implicit defer implementation for META_REPEAT to work */ -#define _META_DEFER_2(m) m _META_EMPTY _META_EMPTY () () - -/* map every group of arg_count arguments onto function m - * i.e. arg_count=2;m=ADD;args=1,2,3,4,5,6,7... - * results in ADD(1,2) ADD(3,4) ADD(5,6) and so on - * MAP##N must exist for arg_count == N to work - */ -#define _META_MAP() META_MAP - -/* implements MAP(1, m, ...) */ -#define _META_MAP_1(m, arg1, ...)\ - m(arg1)\ - _META_DEFER_2(_META_MAP_BODY_TMP)()(1, m, __VA_ARGS__) - -/* implements MAP(2, m, ...) */ -#define _META_MAP_2(m, arg1, arg2, ...)\ - m(arg1, arg2)\ - _META_DEFER_2(_META_MAP_BODY_TMP)()(2, m, __VA_ARGS__) - -/* implements MAP(3, m, ...) */ -#define _META_MAP_3(m, arg1, arg2, arg3, ...)\ - m(arg1, arg2, arg3)\ - _META_DEFER_2(_META_MAP_BODY_TMP)()(3, m, __VA_ARGS__) - -/* used by macro MAP, don't use on its own */ -#define _META_MAP_BODY(arg_count, m, ...)\ - META_IF_ELSE(META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__))(\ - META_CONCAT(_META_MAP_, arg_count)(m, __VA_ARGS__) \ - )() -#define _META_MAP_BODY_TMP() _META_MAP_BODY - -/* map aggregator and every group of arg_count arguments onto function m - * i.e. aggr=x;arg_count=1;m=ADD;args=1,2,3,4,5,6,7... - * results in x = ... ADD(7,ADD(6,ADD(5,ADD(4,ADD(3,ADD(2,ADD(1,x))))))) ... - * MAP##N must exist for arg_count == N to work - */ -#define _META_MAP_AGGREGATE() META_MAP_AGGREGATE - -/* implements MAP_AGGREGATE(1, m, ...) */ -#define _META_MAP_AGGREGATE_1(m, aggr, arg1, ...)\ - _META_MAP_AGGREGATE_BODY(1, m, m(aggr, arg1), __VA_ARGS__) - -/* implements MAP_AGGREGATE(2, m, ...) */ -#define _META_MAP_AGGREGATE_2(m, aggr, arg1, arg2, ...)\ - _META_MAP_AGGREGATE_BODY(2, m, m(aggr, arg1, arg2), __VA_ARGS__) - -/* used by macro MAP_AGGREGATE, don't use on its own */ -#define _META_MAP_AGGREGATE_BODY(arg_count, m, aggr, ...)\ - META_IF_ELSE(META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__))(\ - _META_DEFER_2(_META_MAP_AGGREGATE)()\ - (arg_count, m, aggr, __VA_ARGS__)\ - )(aggr) - -/* UNUSED private macros */ -#define _META_VOID(x) (void)(x) -#define _META_VOID2(x, y) x; _META_VOID(y) - -#endif /* __SOF_TRACE_PREPROC_PRIVATE_H__ */ -#else - #error \ - Illegal use of header file: \ - can only be included from context of \ - __INCLUDE_MACRO_METAPROGRAMMING__ -#endif /* __SOF_TRACE_PREPROC_H__ */ diff --git a/xtos/include/sof/trace/preproc.h b/xtos/include/sof/trace/preproc.h deleted file mode 100644 index 88e4986ea240..000000000000 --- a/xtos/include/sof/trace/preproc.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * - * Author: Michal Jerzy Wierzbicki - */ - -#ifndef __SOF_TRACE_PREPROC_H__ -#define __SOF_TRACE_PREPROC_H__ - -/* Macros in this file are to be invoked directly from code. - * In order to work, they require a number of other macros that are - * defined in the header file specified below. - * Macros from the file specified below are not to meant to be used - * directly / independently. - * For more detailed commentary of innards of macros in this file, - * see file specified below. - */ -#include -#include - -/* count number of var args - during preprocesing - * works for predefined number of args - * META_COUNT_VARAGS_BEFORE_COMPILE(A,B,C,D) evaluates to 4 - */ -#define META_COUNT_VARAGS_BEFORE_COMPILE(...)\ - META_DEC(\ - _META_PP_NARG_BEFORE_COMPILE_(\ - _, ##__VA_ARGS__, _META_PP_RSEQ_N()\ - )\ - ) - -/* treat x as string while forcing x expansion beforehand */ -#define META_QUOTE(x) _META_QUOTE(x) - -/* concat x and y while forcing x and y expansion beforehand */ -#define META_CONCAT(x, y) _META_CONCAT_BASE(x, y) - -/* Only META_NOT(0) evaulates to 1 - * notice, that any x!=0 would also result in 0 - * e.x. META_NOT(123) evaluates to 0 - */ -#define META_NOT(x) _META_IS_PROBE(META_CONCAT(_META_NOT_, x)) -/* hacky way to convert tokens into 0 1*/ -#define META_BOOL(x) META_NOT(META_NOT(x)) - -/* META_IF_ELSE(X)(a)(b) expands to - * b for X == 0 - * a for X != 0 - */ -#define META_IF_ELSE(condition) _META_IF_ELSE(META_BOOL(condition)) - -/* same story with indirection as META_IF_ELSE */ -#define META_IF(condition) _META_IIF(META_BOOL(condition)) - -/* primitive recursion - * default depth is 8 - */ -#define META_RECURSE(...) _META_REQRS_8(__VA_ARGS__) - -/* The only sane way I found to increment values in cpreproc */ -#define META_INC(x) META_CONCAT(_META_INC_, x) - -/* The only sane way I found to decrement values in cpreproc */ -#define META_DEC(x) META_CONCAT(_META_DEC_, x) - -/* map every group of arg_count arguments onto function m - * i.e. arg_count=2;m=ADD;args=1,2,3,4,5,6,7... - * results in ADD(1,2) ADD(3,4) ADD(5,6) and so on - * MAP##N must exist for arg_count == N to work - */ -#define META_MAP(arg_count, m, ...) META_RECURSE(\ - _META_MAP_BODY(arg_count, m, __VA_ARGS__)) - -/* map aggregator and every group of arg_count arguments onto function m - * i.e. aggr=x;arg_count=1;m=ADD;args=1,2,3,4,5,6,7... - * results in x = ... ADD(7,ADD(6,ADD(5,ADD(4,ADD(3,ADD(2,ADD(1,x))))))) ... - * MAP##N must exist for arg_count == N to work - */ -#define META_MAP_AGGREGATE(arg_count, m, aggr, ...)\ - META_CONCAT(_META_MAP_AGGREGATE_, arg_count)(m, aggr, __VA_ARGS__) - -/* counteract compiler warning about unused variables */ -#define SOF_TRACE_UNUSED(arg1, ...) do { META_RECURSE( \ - META_MAP_AGGREGATE(1, _META_VOID2, _META_VOID(arg1), __VA_ARGS__)); \ - } while (0) - -#endif /* __SOF_TRACE_PREPROC_H__ */