Skip to content
90 changes: 90 additions & 0 deletions tests/reframe/config/settings.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
site_configuration = {
'systems': [
{
'name': 'Example_system',
'descr': 'This is just an example system',
'modules_system': 'tmod',
'hostnames': ['login', 'int'],
'partitions': [
{
'name': 'cpu',
'scheduler': 'slurm',
'launcher': 'srun',
'access': ['-p cpu'],
'environs': ['builtin'],
'processor': {
'num_cpus': 24,
},
'descr': 'normal CPU partition'
},
{
'name': 'gpu',
'descr': 'GPU partition',
'scheduler': 'slurm',
'access': ['-p gpu'],
'environs': ['builtin'],
'max_jobs': 100,
'launcher': 'srun',
'processor': {
'num_cpus': 24,
},
'devices': [
{
'type': 'gpu',
'num_devices': 2,
},
],
},
]
},
],
'environments': [
{
'name': 'builtin',
'cc': 'cc',
'cxx': '',
'ftn': '',
},
{
'name': 'container',
'modules': [],
},
],
'logging': [
{
'level': 'debug',
'handlers': [
{
'type': 'stream',
'name': 'stdout',
'level': 'info',
'format': '%(message)s'
},
{
'type': 'file',
'name': 'reframe.log',
'level': 'debug',
'format': '[%(asctime)s] %(levelname)s: %(check_info)s: %(message)s', # noqa: E501
'append': False
}
],
'handlers_perflog': [
{
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'info',
'format': (
'%(check_job_completion_time)s|reframe %(version)s|'
'%(check_info)s|jobid=%(check_jobid)s|'
'%(check_perf_var)s=%(check_perf_value)s|'
'ref=%(check_perf_ref)s '
'(l=%(check_perf_lower_thres)s, '
'u=%(check_perf_upper_thres)s)|'
'%(check_perf_unit)s'
),
'append': True
}
]
}
],
}
94 changes: 94 additions & 0 deletions tests/reframe/config/settings_magic_castle.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
site_configuration = {
'systems': [
{
'name': 'example_system',
'descr': 'This is just an example system',
'modules_system': 'lmod',
'hostnames': ['login', 'node'],
'partitions': [
{
'name': 'cpu',
'scheduler': 'slurm',
'launcher': 'mpirun',
'access': ['-p cpubase_bycore_b1 --exclusive --mem=94515M'],
'environs': ['builtin'],
'max_jobs': 4,
'processor': {
'num_cpus': 36,
'num_sockets': 1,
'num_cpus_per_socket': 36,
},
'descr': 'normal CPU partition'
},
# {
# 'name': 'gpu',
# 'descr': 'GPU partition',
# 'scheduler': 'slurm',
# 'access': ['-p gpu --gpus-per-node 4 --exclusive'],
# 'environs': ['builtin'],
# 'max_jobs': 10,
# 'launcher': 'srun',
# 'processor': {
# 'num_cpus': 72,
# },
# 'devices': [
# {
# 'type': 'gpu',
# 'num_devices': 4,
# },
# ],
# },
]
},
],
'environments': [
{
'name': 'builtin',
'cc': 'cc',
'cxx': '',
'ftn': '',
},
],
'logging': [
{
'level': 'debug',
'handlers': [
{
'type': 'stream',
'name': 'stdout',
'level': 'info',
'format': '%(message)s'
},
{
'type': 'file',
'name': 'reframe.log',
'level': 'debug',
'format': '[%(asctime)s] %(levelname)s: %(check_info)s: %(message)s', # noqa: E501
'append': False
}
],
'handlers_perflog': [
{
'type': 'filelog',
'prefix': '%(check_system)s/%(check_partition)s',
'level': 'info',
'format': (
'%(check_job_completion_time)s|reframe %(version)s|'
'%(check_info)s|jobid=%(check_jobid)s|'
'%(check_perf_var)s=%(check_perf_value)s|'
'ref=%(check_perf_ref)s '
'(l=%(check_perf_lower_thres)s, '
'u=%(check_perf_upper_thres)s)|'
'%(check_perf_unit)s'
),
'append': True
}
]
}
],
'general': [
{
'remote_detect': True,
}
],
}
104 changes: 104 additions & 0 deletions tests/reframe/eessi-checks/applications/tensorflow2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
import os
import reframe as rfm
from reframe.utility import find_modules

from testlib.applications.tensorflow2 import TensorFlow2
import eessi_utils.hooks as hooks
import eessi_utils.utils as utils

@rfm.required_version('>=3.6.2')
@rfm.simple_test
class TensorFlow2_EESSI(TensorFlow2):
'''EESSI TensorFlow 2 check, for pure TensorFlow. This test does not support running on multiple nodes'''

modules = required # Make sure that our apply_module_info hook sets a value
module_info = parameter(find_modules('TensorFlow', environ_mapping={r'.*': 'builtin'}))
# This test is singlenode and should be run in CI
tags = {'singlenode', 'CI'}

@run_after('init')
def apply_module_info(self):
hooks.apply_module_info(test = self, module_info = self.module_info)

# Skip testing GPU-based modules on CPU-based nodes
@run_after('setup')
def skip_gpu_test_on_cpu_nodes(self):
hooks.skip_gpu_test_on_cpu_nodes(self)

# Skip testing with device == gpu on CPU based nodes
@run_after('setup')
def skip_device_gpu_on_cpu_nodes(self):
self.skip_if(
(self.device == 'gpu' and not utils.is_gpu_present(self)),
"Skipping test variant where tf.device is GPU, since this partition contains non-GPU nodes"
)

# This test uses only OpenMP for parallelism, so simply run on all cores
@run_after('setup')
def set_num_tasks(self):
self.num_tasks = 1
self.num_tasks_per_node = 1
self.num_cpus_per_task = self.current_partition.processor.num_cpus
self.omp_num_threads = self.num_cpus_per_task

@run_before('run')
def bind_to_none(self):
hooks.bind_to_none(self)

@rfm.required_version('>=3.6.2')
@rfm.simple_test
class TensorFlow2_Horovod_EESSI(TensorFlow2):
'''EESSI TensorFlow 2 check, with multiprocessing support through Horovod.
This test will run TensorFlow2 using all modules with 'TensorFlow' in the module environment it can find.
On GPU nodes, it will only run tests if the module names also contain 'cuda'.
On CPU nodes, it will only run tests if a module name does NOT contain 'cuda'.
Whether a node is CPU/GPU is determined based on if a device named 'gpu' is specified in the ReFrame settings file for the current partition.
Number of tasks, tasks per node and cpus per task are set based on the number of GPUs and number of CPUs specified in the ReFrame config file for the current partition.
When using multiple CPU nodes, the number of OMP_NUM_THREADS is set to the core count minus 1, to leave one dedicated thread for Horovod.
'''

modules = required # Make sure that our apply_module_info hook sets a value
scale = parameter([
('singlenode', 1),
('n_small', 2),
('n_medium', 8),
('n_large', 16)
])
module_info = parameter(find_modules('Horovod', environ_mapping={r'.*': 'builtin'}))

@run_after('init')
def apply_module_info(self):
hooks.apply_module_info(test = self, module_info = self.module_info)

@run_after('init')
def set_test_scale(self):
scale_variant, self.num_nodes = self.scale
self.tags.add(scale_variant)

# Skip testing GPU-based modules on CPU-based nodes
@run_after('setup')
def skip_gpu_test_on_cpu_nodes(self):
hooks.skip_gpu_test_on_cpu_nodes(self)

# Skip testing with device == gpu on CPU based nodes
@run_after('setup')
def skip_device_gpu_on_cpu_nodes(self):
self.skip_if(
(self.device == 'gpu' and not utils.is_gpu_present(self)),
"Skipping test variant where tf.device is GPU, since this partition contains non-GPU nodes"
)

# Assign num_tasks, num_tasks_per_node and num_cpus_per_task automatically based on current partition's num_cpus and gpus
@run_after('setup')
def set_num_tasks(self):
hooks.auto_assign_num_tasks_hybrid(test = self, num_nodes = self.num_nodes)

@run_after('setup')
def set_omp_num_threads(self):
# For CPU runs on more than 4 cores, leave one thread idle for Horovod
if self.device == 'cpu' and self.num_cpus_per_task > 4:
self.omp_num_threads = self.num_cpus_per_task - 1
else:
self.omp_num_threads = self.num_cpus_per_task
print("Set omp_num_threads to: %s" % self.omp_num_threads)

61 changes: 61 additions & 0 deletions tests/reframe/eessi_utils/hooks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import reframe as rfm
import eessi_utils.utils as utils
from typing import Tuple

def apply_module_info(test: rfm.RegressionTest, module_info: Tuple[str, str, str]):
'''Apply module info that was obtained with a find_modules.
To work with this hook, module_info should store the return of parameter(find_modules(...))'''
sys, env, mod = module_info
test.valid_systems = [sys]
test.modules = [mod]
test.valid_prog_environs = [env]

def skip_cpu_test_on_gpu_nodes(test: rfm.RegressionTest):
'''Skip test if GPUs are present, but no CUDA is required'''
skip = ( utils.is_gpu_present(test) and not utils.is_cuda_required(test) )
if skip:
print("GPU is present on this partition, skipping CPU-based test")
test.skip_if(True)

def skip_gpu_test_on_cpu_nodes(test: rfm.RegressionTest):
'''Skip test if CUDA is required, but no GPU is present'''
skip = ( utils.is_cuda_required(test) and not utils.is_gpu_present(test) )
if skip:
print("Test requires CUDA, but no GPU is present in this partition. Skipping test...")
test.skip_if(True)

def auto_assign_num_tasks_MPI(test: rfm.RegressionTest, num_nodes: int) -> rfm.RegressionTest:
'''Automatically sets num_tasks, tasks_per_node and cpus_per_task based on the current partitions num_cpus, number of GPUs and test.num_nodes. For GPU tests, one task per GPU is set, and num_cpus_per_task is based on the ratio of CPU cores/GPUs. For CPU tests, one task per CPU is set, and num_cpus_per_task is set to 1. Total task count is determined based on the number of nodes to be used in the test. Behaviour of this function is (usually) sensible for pure MPI tests.'''
if utils.is_cuda_required(test):
test.num_tasks_per_node = utils.get_num_gpus(test)
test.num_cpus_per_task = int(test.current_partition.processor.num_cpus / test.num_tasks_per_node)
else:
test.num_tasks_per_node = test.current_partition.processor.num_cpus
test.num_cpus_per_task = 1
test.num_tasks = num_nodes * test.num_tasks_per_node

def auto_assign_num_tasks_hybrid(test: rfm.RegressionTest, num_nodes: int) -> rfm.RegressionTest:
'''Automatically sets num_tasks, tasks_per_node and cpus_per_task based on the current partitions num_cpus, num_sockets, number of GPUs and test.num_nodes. For GPU tests, one task per GPU is set, and num_cpus_per_task is based on the ratio of CPU cores/GPUs. For CPU tests, one task per CPU socket is set, and num_cpus_per_task is set to #CPU cores / #sockets. Total task count is determined based on the number of nodes to be used in the test. Behaviour of this function is (usually) sensible for hybrid OpenMP-MPI tests. For sockets with very large core counts (i.e. where OpenMP cannot exploit sufficient parallelism), this approach may be inefficient and more than 1 task per socket may be desirable.'''
if utils.is_cuda_required(test):
test.num_tasks_per_node = utils.get_num_gpus(test)
test.num_cpus_per_task = int(test.current_partition.processor.num_cpus / test.num_tasks_per_node)
else:
# For AMD ZEN2 nodes, hybrid programs might run faster when launching task per numa domain instaed of per socket.
# Not sure if numa domain detection is supported (yet) in ReFrame... If so, we can improve this segment (and change the --bind-to socket to --bind-to numa)
test.num_tasks_per_node = test.current_partition.processor.num_sockets
test.num_cpus_per_task = test.current_partition.processor.num_cpus_per_socket
test.num_tasks = num_nodes * test.num_tasks_per_node

# Bind to none if running hybrid with only a single task per node
if test.current_partition.launcher_type.registered_name == 'mpirun':
if test.num_tasks_per_node == 1:
test.job.launcher.options.append(' --bind-to none')
else:
test.job.launcher.options.append(' --bind-to socket')

def bind_to_none(test:rfm.RegressionTest) -> rfm.RegressionTest:
'''Set --bind-to none in case the launcher is mpirun'''
if test.current_partition.launcher_type.registered_name == 'mpirun':
if test.num_tasks_per_node > 1:
print("Warning: test is running with more than one task per node, but you are binding to 'none'. This is probably a mistake in the test implementation.")
test.job.launcher.options.append(' --bind-to none')
35 changes: 35 additions & 0 deletions tests/reframe/eessi_utils/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import re

import reframe as rfm


gpu_dev_name = 'gpu'

def _get_gpu_list(test: rfm.RegressionTest):
return [ dev.num_devices for dev in test.current_partition.devices if dev.device_type == gpu_dev_name ]

def get_num_gpus(test: rfm.RegressionTest) -> int:
'''Returns the number of GPUs for the current partition'''
gpu_list = _get_gpu_list(test)
# If multiple devices are called 'GPU' in the current partition,
# we don't know for which to return the device count...
if(len(gpu_list) != 1):
raise ValueError(f"Multiple different devices exist with the name "
f"'{gpu_dev_name}' for partition '{test.current_partition.name}'. "
f"Cannot determine number of GPUs available for the test. "
f"Please check the definition of partition '{test.current_partition.name}' "
f"in your ReFrame config file.")

return gpu_list[0]

def is_gpu_present(test: rfm.RegressionTest) -> bool:
'''Checks if GPUs are present in the current partition'''
return ( len(_get_gpu_list(test)) >= 1 )

def is_cuda_required(test: rfm.RegressionTest) -> bool:
'''Checks if CUDA seems to be required by current module'''
requires_cuda = False
for module in test.modules:
if re.search("(?i)cuda", module):
requires_cuda = True
return requires_cuda
Loading