diff --git a/analysis/benchmark_results.py b/analysis/benchmark_results.py
index 39ed41cdf..178723fdb 100644
--- a/analysis/benchmark_results.py
+++ b/analysis/benchmark_results.py
@@ -95,30 +95,30 @@ def _benchmark_snapshot_df(self):
@property
@functools.lru_cache()
def _benchmark_coverage_dict(self):
- """Covered regions of each fuzzer on this benchmark."""
+ """Covered branches of each fuzzer on this benchmark."""
return coverage_data_utils.get_benchmark_cov_dict(
self._coverage_dict, self.name)
@property
@functools.lru_cache()
def _benchmark_aggregated_coverage_df(self):
- """Aggregated covered regions of each fuzzer on this benchmark."""
+ """Aggregated covered branches of each fuzzer on this benchmark."""
return coverage_data_utils.get_benchmark_aggregated_cov_df(
self._coverage_dict, self.name)
@property
@functools.lru_cache()
- def _unique_region_dict(self):
- """Unique regions with the fuzzers that cover it."""
- return coverage_data_utils.get_unique_region_dict(
+ def _unique_branch_dict(self):
+ """Unique branches with the fuzzers that cover it."""
+ return coverage_data_utils.get_unique_branch_dict(
self._benchmark_coverage_dict)
@property
@functools.lru_cache()
- def unique_region_cov_df(self):
- """Fuzzers with the number of covered unique regions."""
- return coverage_data_utils.get_unique_region_cov_df(
- self._unique_region_dict, self.fuzzer_names)
+ def unique_branch_cov_df(self):
+ """Fuzzers with the number of covered unique branches."""
+ return coverage_data_utils.get_unique_branch_cov_df(
+ self._unique_branch_dict, self.fuzzer_names)
@property
def fuzzers_with_not_enough_samples(self):
@@ -344,12 +344,12 @@ def _generic_violin_plot(self, filename, bugs=False):
@property
def violin_plot(self):
- """Region coverage violin plot."""
+ """Branch coverage violin plot."""
return self._generic_violin_plot('violin.svg')
@property
def bug_violin_plot(self):
- """Region coverage violin plot."""
+ """Branch coverage violin plot."""
return self._generic_violin_plot('bug_violin.svg', bugs=True)
def _generic_box_plot(self, filename, bugs=False):
@@ -362,7 +362,7 @@ def _generic_box_plot(self, filename, bugs=False):
@property
def box_plot(self):
- """Region coverage boxplot."""
+ """Branch coverage boxplot."""
return self._generic_box_plot('boxplot.svg')
@property
@@ -399,19 +399,19 @@ def better_than_plot(self):
@property
def unique_coverage_ranking_plot(self):
"""Ranking plot for unique coverage."""
- plot_filename = self._prefix_with_benchmark('ranking_unique_region.svg')
- unique_region_cov_df_combined = self.unique_region_cov_df.merge(
+ plot_filename = self._prefix_with_benchmark('ranking_unique_branch.svg')
+ unique_branch_cov_df_combined = self.unique_branch_cov_df.merge(
self._benchmark_aggregated_coverage_df, on='fuzzer')
self._plotter.write_unique_coverage_ranking_plot(
- unique_region_cov_df_combined, self._get_full_path(plot_filename))
+ unique_branch_cov_df_combined, self._get_full_path(plot_filename))
return plot_filename
@property
@functools.lru_cache()
def pairwise_unique_coverage_table(self):
"""Pairwise unique coverage table for each pair of fuzzers."""
- fuzzers = self.unique_region_cov_df.sort_values(
- by='unique_regions_covered', ascending=False).fuzzer
+ fuzzers = self.unique_branch_cov_df.sort_values(
+ by='unique_branches_covered', ascending=False).fuzzer
return coverage_data_utils.get_pairwise_unique_coverage_table(
self._benchmark_coverage_dict, fuzzers)
diff --git a/analysis/coverage_data_utils.py b/analysis/coverage_data_utils.py
index b575409e1..90cb22d7c 100644
--- a/analysis/coverage_data_utils.py
+++ b/analysis/coverage_data_utils.py
@@ -74,96 +74,96 @@ def get_coverage_report_filestore_path(fuzzer: str, benchmark: str,
fuzzer, 'index.html')
-def get_covered_regions_dict(experiment_df: pd.DataFrame) -> Dict:
+def get_covered_branches_dict(experiment_df: pd.DataFrame) -> Dict:
"""Combines json files for different fuzzer-benchmark pair in
- |experiment_df| and returns a dictionary of the covered regions."""
+ |experiment_df| and returns a dictionary of the covered branches."""
fuzzers_and_benchmarks = set(
zip(experiment_df.fuzzer, experiment_df.benchmark))
arguments = [(fuzzer, benchmark,
get_experiment_filestore_path_for_fuzzer_benchmark(
fuzzer, benchmark, experiment_df))
for fuzzer, benchmark in fuzzers_and_benchmarks]
- result = itertools.starmap(get_fuzzer_benchmark_covered_regions_and_key,
+ result = itertools.starmap(get_fuzzer_benchmark_covered_branches_and_key,
arguments)
return dict(result)
-def get_fuzzer_benchmark_covered_regions_filestore_path(
+def get_fuzzer_benchmark_covered_branches_filestore_path(
fuzzer: str, benchmark: str, exp_filestore_path: str) -> str:
- """Returns the path to the covered regions json file in the |filestore| for
+ """Returns the path to the covered branches json file in the |filestore| for
|fuzzer| and |benchmark|."""
return posixpath.join(exp_filestore_path, 'coverage', 'data', benchmark,
- fuzzer, 'covered_regions.json')
+ fuzzer, 'covered_branches.json')
-def get_fuzzer_covered_regions(fuzzer: str, benchmark: str, filestore: str):
- """Returns the covered regions dict for |fuzzer| from the json file in the
+def get_fuzzer_covered_branches(fuzzer: str, benchmark: str, filestore: str):
+ """Returns the covered branches dict for |fuzzer| from the json file in the
filestore."""
- src_file = get_fuzzer_benchmark_covered_regions_filestore_path(
+ src_file = get_fuzzer_benchmark_covered_branches_filestore_path(
fuzzer, benchmark, filestore)
with tempfile.NamedTemporaryFile() as dst_file:
if filestore_utils.cp(src_file, dst_file.name,
expect_zero=False).retcode:
- logger.warning('covered_regions.json file: %s could not be copied.',
- src_file)
+ logger.warning(
+ 'covered_branches.json file: %s could not be copied.', src_file)
return {}
with open(dst_file.name) as json_file:
return json.load(json_file)
-def get_fuzzer_benchmark_covered_regions_and_key(
+def get_fuzzer_benchmark_covered_branches_and_key(
fuzzer: str, benchmark: str, filestore: str) -> Tuple[str, Dict]:
"""Accepts |fuzzer|, |benchmark|, |filestore|.
- Returns a tuple containing the fuzzer benchmark key and the regions covered
+ Returns a tuple containing the fuzzer benchmark key and the branches covered
by the fuzzer on the benchmark."""
- fuzzer_benchmark_covered_regions = get_fuzzer_covered_regions(
+ fuzzer_benchmark_covered_branches = get_fuzzer_covered_branches(
fuzzer, benchmark, filestore)
key = fuzzer_and_benchmark_to_key(fuzzer, benchmark)
- return key, fuzzer_benchmark_covered_regions
+ return key, fuzzer_benchmark_covered_branches
-def get_unique_region_dict(benchmark_coverage_dict: Dict) -> Dict:
+def get_unique_branch_dict(benchmark_coverage_dict: Dict) -> Dict:
"""Returns a dictionary containing the covering fuzzers for each unique
- region, where the |threshold| defines which regions are unique."""
- region_dict = collections.defaultdict(list)
- unique_region_dict = {}
+ branch, where the |threshold| defines which branches are unique."""
+ branch_dict = collections.defaultdict(list)
+ unique_branch_dict = {}
threshold_count = 1
for fuzzer in benchmark_coverage_dict:
- for region in benchmark_coverage_dict[fuzzer]:
- region_dict[region].append(fuzzer)
- for region, fuzzers in region_dict.items():
+ for branch in benchmark_coverage_dict[fuzzer]:
+ branch_dict[branch].append(fuzzer)
+ for branch, fuzzers in branch_dict.items():
if len(fuzzers) <= threshold_count:
- unique_region_dict[region] = fuzzers
- return unique_region_dict
+ unique_branch_dict[branch] = fuzzers
+ return unique_branch_dict
-def get_unique_region_cov_df(unique_region_dict: Dict,
+def get_unique_branch_cov_df(unique_branch_dict: Dict,
fuzzer_names: List[str]) -> pd.DataFrame:
"""Returns a DataFrame where the two columns are fuzzers and the number of
- unique regions covered."""
+ unique branches covered."""
fuzzers = collections.defaultdict(int)
- for region in unique_region_dict:
- for fuzzer in unique_region_dict[region]:
+ for branch in unique_branch_dict:
+ for fuzzer in unique_branch_dict[branch]:
fuzzers[fuzzer] += 1
- dict_to_transform = {'fuzzer': [], 'unique_regions_covered': []}
+ dict_to_transform = {'fuzzer': [], 'unique_branches_covered': []}
for fuzzer in fuzzer_names:
covered_num = fuzzers[fuzzer]
dict_to_transform['fuzzer'].append(fuzzer)
- dict_to_transform['unique_regions_covered'].append(covered_num)
+ dict_to_transform['unique_branches_covered'].append(covered_num)
return pd.DataFrame(dict_to_transform)
def get_benchmark_cov_dict(coverage_dict, benchmark):
- """Returns a dictionary to store the covered regions of each fuzzer. Uses a
- set of tuples to store the covered regions."""
+ """Returns a dictionary to store the covered branches of each fuzzer. Uses a
+ set of tuples to store the covered branches."""
benchmark_cov_dict = {}
- for key, covered_regions in coverage_dict.items():
+ for key, covered_braches in coverage_dict.items():
current_fuzzer, current_benchmark = key_to_fuzzer_and_benchmark(key)
if current_benchmark == benchmark:
- covered_regions_in_set = set()
- for region in covered_regions:
- covered_regions_in_set.add(tuple(region))
- benchmark_cov_dict[current_fuzzer] = covered_regions_in_set
+ covered_braches_in_set = set()
+ for branch in covered_braches:
+ covered_braches_in_set.add(tuple(branch))
+ benchmark_cov_dict[current_fuzzer] = covered_braches_in_set
return benchmark_cov_dict
@@ -171,12 +171,12 @@ def get_benchmark_aggregated_cov_df(coverage_dict, benchmark):
"""Returns a dataframe where each row represents a fuzzer and its aggregated
coverage number."""
dict_to_transform = {'fuzzer': [], 'aggregated_edges_covered': []}
- for key, covered_regions in coverage_dict.items():
+ for key, covered_branches in coverage_dict.items():
current_fuzzer, current_benchmark = key_to_fuzzer_and_benchmark(key)
if current_benchmark == benchmark:
dict_to_transform['fuzzer'].append(current_fuzzer)
dict_to_transform['aggregated_edges_covered'].append(
- len(covered_regions))
+ len(covered_branches))
return pd.DataFrame(dict_to_transform)
@@ -186,7 +186,7 @@ def get_pairwise_unique_coverage_table(benchmark_coverage_dict, fuzzers):
The pairwise unique coverage table is a square matrix where each
row and column represents a fuzzer, and each cell contains a number
- showing the regions covered by the fuzzer of the column but not by
+ showing the branches covered by the fuzzer of the column but not by
the fuzzer of the row."""
pairwise_unique_coverage_values = []
@@ -204,16 +204,16 @@ def get_pairwise_unique_coverage_table(benchmark_coverage_dict, fuzzers):
columns=fuzzers)
-def get_unique_covered_percentage(fuzzer_row_covered_regions,
- fuzzer_col_covered_regions):
- """Returns the number of regions covered by the fuzzer of the column but not
- by the fuzzer of the row."""
+def get_unique_covered_percentage(fuzzer_row_covered_branches,
+ fuzzer_col_covered_branches):
+ """Returns the number of branches covered by the fuzzer of the
+ column but not by the fuzzer of the row."""
- unique_region_count = 0
- for region in fuzzer_col_covered_regions:
- if region not in fuzzer_row_covered_regions:
- unique_region_count += 1
- return unique_region_count
+ unique_branch_count = 0
+ for branch in fuzzer_col_covered_branches:
+ if branch not in fuzzer_row_covered_branches:
+ unique_branch_count += 1
+ return unique_branch_count
def rank_by_average_normalized_score(benchmarks_unique_coverage_list):
diff --git a/analysis/experiment_results.py b/analysis/experiment_results.py
index e23303a73..b7be0ae6a 100644
--- a/analysis/experiment_results.py
+++ b/analysis/experiment_results.py
@@ -264,7 +264,7 @@ def rank_by_unique_coverage_average_normalized_score(self):
"""Rank fuzzers using average normalized score on unique code coverage
across benchmarks."""
benchmarks_unique_coverage_list = [
- benchmark.unique_region_cov_df for benchmark in self.benchmarks
+ benchmark.unique_branch_cov_df for benchmark in self.benchmarks
]
return coverage_data_utils.rank_by_average_normalized_score(
benchmarks_unique_coverage_list)
diff --git a/analysis/generate_report.py b/analysis/generate_report.py
index efbe4ef3f..06ad9ee01 100644
--- a/analysis/generate_report.py
+++ b/analysis/generate_report.py
@@ -226,7 +226,7 @@ def generate_report(experiment_names,
coverage_dict = {}
if coverage_report:
logger.info('Generating coverage report info.')
- coverage_dict = coverage_data_utils.get_covered_regions_dict(
+ coverage_dict = coverage_data_utils.get_covered_branches_dict(
experiment_df)
logger.info('Finished generating coverage report info.')
diff --git a/analysis/plotting.py b/analysis/plotting.py
index 088e8ce1e..14b27d573 100644
--- a/analysis/plotting.py
+++ b/analysis/plotting.py
@@ -181,7 +181,7 @@ def coverage_growth_plot(self,
loc='upper left',
frameon=False)
- axes.set(ylabel='Bug coverage' if bugs else 'Code region coverage')
+ axes.set(ylabel='Bug coverage' if bugs else 'Code branch coverage')
axes.set(xlabel='Time (hour:minute)')
if self._logscale or logscale:
@@ -260,7 +260,7 @@ def box_or_violin_plot(self,
sns.stripplot(**common_args, size=3, color="black", alpha=0.6)
axes.set_title(_formatted_title(benchmark_snapshot_df))
- ylabel = 'Reached {} coverage'.format('bug' if bugs else 'region')
+ ylabel = 'Reached {} coverage'.format('bug' if bugs else 'branch')
axes.set(ylabel=ylabel)
axes.set(xlabel='Fuzzer (highest median coverage on the left)')
axes.set_xticklabels(axes.get_xticklabels(),
@@ -307,7 +307,7 @@ def distribution_plot(self, benchmark_snapshot_df, axes=None, bugs=False):
axes.set_title(_formatted_title(benchmark_snapshot_df))
axes.legend(loc='upper right', frameon=False)
- axes.set(xlabel='Bug coverage' if bugs else 'Code region coverage')
+ axes.set(xlabel='Bug coverage' if bugs else 'Code branch coverage')
axes.set(ylabel='Density')
axes.set_xticklabels(axes.get_xticklabels(),
rotation=_DEFAULT_LABEL_ROTATION,
@@ -339,7 +339,7 @@ def ranking_plot(self, benchmark_snapshot_df, axes=None, bugs=False):
ax=axes)
axes.set_title(_formatted_title(benchmark_snapshot_df))
- ylabel = 'Reached {} coverage'.format('bug' if bugs else 'region')
+ ylabel = 'Reached {} coverage'.format('bug' if bugs else 'branch')
axes.set(ylabel=ylabel)
axes.set(xlabel='Fuzzer (highest median coverage on the left)')
axes.set_xticklabels(axes.get_xticklabels(),
@@ -491,17 +491,17 @@ def write_critical_difference_plot(self, average_ranks, num_of_benchmarks,
plt.close(fig)
def unique_coverage_ranking_plot(self,
- unique_region_cov_df_combined,
+ unique_branch_cov_df_combined,
axes=None):
"""Draws unique_coverage_ranking plot. The fuzzer labels will be in
the order of their coverage."""
- fuzzer_order = unique_region_cov_df_combined.sort_values(
- by='unique_regions_covered', ascending=False).fuzzer
+ fuzzer_order = unique_branch_cov_df_combined.sort_values(
+ by='unique_branches_covered', ascending=False).fuzzer
- axes = sns.barplot(y='unique_regions_covered',
+ axes = sns.barplot(y='unique_branches_covered',
x='fuzzer',
- data=unique_region_cov_df_combined,
+ data=unique_branch_cov_df_combined,
order=fuzzer_order,
palette=self._fuzzer_colors,
ax=axes)
@@ -517,7 +517,7 @@ def unique_coverage_ranking_plot(self,
sns.barplot(y='aggregated_edges_covered',
x='fuzzer',
- data=unique_region_cov_df_combined,
+ data=unique_branch_cov_df_combined,
order=fuzzer_order,
facecolor=(1, 1, 1, 0),
edgecolor='0.2',
@@ -531,11 +531,11 @@ def unique_coverage_ranking_plot(self,
sns.despine(ax=axes, trim=True)
- def write_unique_coverage_ranking_plot(self, unique_region_cov_df_combined,
+ def write_unique_coverage_ranking_plot(self, unique_branch_cov_df_combined,
image_path):
"""Writes ranking plot for unique coverage."""
self._write_plot_to_image(self.unique_coverage_ranking_plot,
- unique_region_cov_df_combined,
+ unique_branch_cov_df_combined,
image_path,
wide=True)
diff --git a/analysis/report_templates/default.html b/analysis/report_templates/default.html
index 105daaee7..2d9252549 100644
--- a/analysis/report_templates/default.html
+++ b/analysis/report_templates/default.html
@@ -385,12 +385,12 @@
Mann-Whitney U test
-
Ranking by unique code regions covered
+ Ranking by unique code branches covered
- Each bar shows the total number of code regions found by a given fuzzer.
- The colored area shows the number of unique code regions
- (i.e., regions that were not covered by any other fuzzers).
+ Each bar shows the total number of code branches found by a given fuzzer.
+ The colored area shows the number of unique code branches
+ (i.e., branches that were not covered by any other fuzzers).
@@ -399,7 +399,7 @@ Ranking by unique code regions covered
Pairwise unique code coverage
- Each cell represents the number of code regions covered by the fuzzer
+ Each cell represents the number of code branches covered by the fuzzer
of the column but not by the fuzzer of the row
diff --git a/analysis/test_coverage_data_utils.py b/analysis/test_coverage_data_utils.py
index 1a5127ea5..18b2b1c65 100644
--- a/analysis/test_coverage_data_utils.py
+++ b/analysis/test_coverage_data_utils.py
@@ -44,41 +44,41 @@ def create_coverage_data():
}
-def test_get_unique_region_dict():
- """Tests get_unique_region_dict() function."""
+def test_get_unique_branch_dict():
+ """Tests get_unique_branch_dict() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
- unique_region_dict = coverage_data_utils.get_unique_region_dict(
+ unique_branch_dict = coverage_data_utils.get_unique_branch_dict(
benchmark_coverage_dict)
expected_dict = {
(0, 0, 2, 2): ['afl'],
(0, 0, 2, 3): ['libfuzzer'],
(0, 0, 4, 4): ['libfuzzer']
}
- assert expected_dict == unique_region_dict
+ assert expected_dict == unique_branch_dict
-def test_get_unique_region_cov_df():
- """Tests get_unique_region_cov_df() function."""
+def test_get_unique_branch_cov_df():
+ """Tests get_unique_branch_cov_df() function."""
coverage_dict = create_coverage_data()
benchmark_coverage_dict = coverage_data_utils.get_benchmark_cov_dict(
coverage_dict, 'libpng-1.2.56')
- unique_region_dict = coverage_data_utils.get_unique_region_dict(
+ unique_branch_dict = coverage_data_utils.get_unique_branch_dict(
benchmark_coverage_dict)
fuzzer_names = ['afl', 'libfuzzer']
- unique_region_df = coverage_data_utils.get_unique_region_cov_df(
- unique_region_dict, fuzzer_names)
- unique_region_df = unique_region_df.sort_values(by=['fuzzer']).reset_index(
+ unique_branch_df = coverage_data_utils.get_unique_branch_cov_df(
+ unique_branch_dict, fuzzer_names)
+ unique_branch_df = unique_branch_df.sort_values(by=['fuzzer']).reset_index(
drop=True)
expected_df = pd.DataFrame([{
'fuzzer': 'afl',
- 'unique_regions_covered': 1
+ 'unique_branches_covered': 1
}, {
'fuzzer': 'libfuzzer',
- 'unique_regions_covered': 2
+ 'unique_branches_covered': 2
}])
- assert unique_region_df.equals(expected_df)
+ assert unique_branch_df.equals(expected_df)
def test_get_benchmark_cov_dict():
@@ -109,15 +109,15 @@ def test_get_pairwise_unique_coverage_table():
pd_test.assert_frame_equal(table, expected_table)
-def test_get_fuzzer_benchmark_covered_regions_filestore_path():
- """Tests that get_fuzzer_benchmark_covered_regions_filestore_path returns
+def test_get_fuzzer_benchmark_covered_branches_filestore_path():
+ """Tests that get_fuzzer_benchmark_covered_branches_filestore_path returns
the correct result."""
- assert (
- coverage_data_utils.get_fuzzer_benchmark_covered_regions_filestore_path(
- FUZZER, BENCHMARK,
- EXPERIMENT_FILESTORE_PATH) == ('gs://fuzzbench-data/myexperiment/'
- 'coverage/data/libpng-1.2.56/afl/'
- 'covered_regions.json'))
+ assert (coverage_data_utils.
+ get_fuzzer_benchmark_covered_branches_filestore_path(
+ FUZZER, BENCHMARK, EXPERIMENT_FILESTORE_PATH) == (
+ 'gs://fuzzbench-data/myexperiment/'
+ 'coverage/data/libpng-1.2.56/afl/'
+ 'covered_branches.json'))
def test_fuzzer_and_benchmark_to_key():
diff --git a/analysis/test_plotting.py b/analysis/test_plotting.py
index 947c2379f..0ec44a7cf 100644
--- a/analysis/test_plotting.py
+++ b/analysis/test_plotting.py
@@ -42,13 +42,13 @@ def test_unique_coverage_ranking_plot(tmp_path):
fuzzer_num = 22
fuzzers = [f'fuzzer-{i}' for i in range(fuzzer_num)]
- unique_regions = [10 * i for i in range(fuzzer_num)]
- total_regions = [1000] * fuzzer_num
+ unique_branchs = [10 * i for i in range(fuzzer_num)]
+ total_branches = [1000] * fuzzer_num
df = pd.DataFrame({
'fuzzer': fuzzers,
- 'unique_regions_covered': unique_regions,
- 'aggregated_edges_covered': total_regions
+ 'unique_branches_covered': unique_branchs,
+ 'aggregated_edges_covered': total_branches
})
plotter = plotting.Plotter(fuzzers)
diff --git a/experiment/measurer/coverage_utils.py b/experiment/measurer/coverage_utils.py
index 0122b8454..7123ed4e2 100644
--- a/experiment/measurer/coverage_utils.py
+++ b/experiment/measurer/coverage_utils.py
@@ -48,15 +48,17 @@ def generate_coverage_reports(experiment_config: dict):
benchmarks = experiment_config['benchmarks']
fuzzers = experiment_config['fuzzers']
experiment = experiment_config['experiment']
+ region_coverage = experiment_config['region_coverage']
for benchmark in benchmarks:
for fuzzer in fuzzers:
- generate_coverage_report(experiment, benchmark, fuzzer)
+ generate_coverage_report(experiment, benchmark, fuzzer,
+ region_coverage)
logger.info('Finished generating coverage reports.')
-def generate_coverage_report(experiment, benchmark, fuzzer):
+def generate_coverage_report(experiment, benchmark, fuzzer, region_coverage):
"""Generates the coverage report for one pair of benchmark and fuzzer."""
logger.info(
('Generating coverage report for '
@@ -64,7 +66,8 @@ def generate_coverage_report(experiment, benchmark, fuzzer):
fuzzer=fuzzer))
try:
- coverage_reporter = CoverageReporter(experiment, fuzzer, benchmark)
+ coverage_reporter = CoverageReporter(experiment, fuzzer, benchmark,
+ region_coverage)
# Merges all the profdata files.
coverage_reporter.merge_profdata_files()
@@ -72,8 +75,8 @@ def generate_coverage_report(experiment, benchmark, fuzzer):
# Generate the coverage summary json file based on merged profdata file.
coverage_reporter.generate_coverage_summary_json()
- # Generate the coverage regions json file.
- coverage_reporter.generate_coverage_regions_json()
+ # Generate the coverage branches json file.
+ coverage_reporter.generate_coverage_branches_json()
# Generates the html reports using llvm-cov.
coverage_reporter.generate_coverage_report()
@@ -88,11 +91,12 @@ class CoverageReporter: # pylint: disable=too-many-instance-attributes
fuzzer and benchmark."""
# pylint: disable=too-many-arguments
- def __init__(self, experiment, fuzzer, benchmark):
+ def __init__(self, experiment, fuzzer, benchmark, region_coverage):
self.fuzzer = fuzzer
self.benchmark = benchmark
self.experiment = experiment
self.trial_ids = get_trial_ids(experiment, fuzzer, benchmark)
+ self.region_coverage = region_coverage
coverage_info_dir = get_coverage_info_dir()
self.report_dir = os.path.join(coverage_info_dir, 'reports', benchmark,
@@ -168,15 +172,19 @@ def generate_coverage_report(self):
dst_dir = exp_path.filestore(self.report_dir)
filestore_utils.cp(src_dir, dst_dir, recursive=True, parallel=True)
- def generate_coverage_regions_json(self):
+ def generate_coverage_branches_json(self):
"""Stores the coverage data in a json file."""
- covered_regions = extract_covered_regions_from_summary_json(
- self.merged_summary_json_file)
- coverage_json_src = os.path.join(self.data_dir, 'covered_regions.json')
+ if self.region_coverage:
+ edges_covered = extract_covered_regions_from_summary_json(
+ self.merged_summary_json_file)
+ else:
+ edges_covered = extract_covered_branches_from_summary_json(
+ self.merged_summary_json_file)
+ coverage_json_src = os.path.join(self.data_dir, 'covered_branches.json')
coverage_json_dst = exp_path.filestore(coverage_json_src)
filesystem.create_directory(self.data_dir)
with open(coverage_json_src, 'w') as file_handle:
- json.dump(covered_regions, file_handle)
+ json.dump(edges_covered, file_handle)
filestore_utils.cp(coverage_json_src,
coverage_json_dst,
expect_zero=False)
@@ -271,6 +279,36 @@ def generate_json_summary(coverage_binary,
return result
+def extract_covered_branches_from_summary_json(summary_json_file):
+ """Returns the covered branches given a coverage summary json file."""
+ covered_branches = []
+ try:
+ coverage_info = get_coverage_infomation(summary_json_file)
+ functions_data = coverage_info['data'][0]['functions']
+
+ # The fourth and the fifth item tell whether the branch is evaluated to
+ # true or false respectively.
+ hit_true_index = 4
+ hit_false_index = 5
+ # The last number in the branch-list indicates what type of the
+ # region it is; 'branch_region' is represented by number 4.
+ type_index = -1
+ branch_region_type = 4
+ # The number of index 6 represents the file number.
+ file_index = 6
+ for function_data in functions_data:
+ for branch in function_data['branches']:
+ if branch[hit_true_index] != 0 or branch[
+ hit_false_index] != 0 and branch[
+ type_index] == branch_region_type:
+ covered_branches.append(branch[:hit_true_index] +
+ branch[file_index:])
+
+ except Exception: # pylint: disable=broad-except
+ logger.error('Coverage summary json file defective or missing.')
+ return covered_branches
+
+
def extract_covered_regions_from_summary_json(summary_json_file):
"""Returns the covered regions given a coverage summary json file."""
covered_regions = []
diff --git a/experiment/measurer/measure_manager.py b/experiment/measurer/measure_manager.py
index 07e48dda3..b926c2515 100644
--- a/experiment/measurer/measure_manager.py
+++ b/experiment/measurer/measure_manager.py
@@ -76,7 +76,9 @@ def measure_main(experiment_config):
max_total_time = experiment_config['max_total_time']
measurers_cpus = experiment_config['measurers_cpus']
runners_cpus = experiment_config['runners_cpus']
- measure_loop(experiment, max_total_time, measurers_cpus, runners_cpus)
+ region_coverage = experiment_config['region_coverage']
+ measure_loop(experiment, max_total_time, measurers_cpus, runners_cpus,
+ region_coverage)
# Clean up resources.
gc.collect()
@@ -97,7 +99,8 @@ def _process_init(cores_queue):
def measure_loop(experiment: str,
max_total_time: int,
measurers_cpus=None,
- runners_cpus=None):
+ runners_cpus=None,
+ region_coverage=False):
"""Continuously measure trials for |experiment|."""
logger.info('Start measure_loop.')
@@ -126,7 +129,8 @@ def measure_loop(experiment: str,
# races.
all_trials_ended = scheduler.all_trials_ended(experiment)
- if not measure_all_trials(experiment, max_total_time, pool, q):
+ if not measure_all_trials(experiment, max_total_time, pool, q,
+ region_coverage):
# We didn't measure any trials.
if all_trials_ended:
# There are no trials producing snapshots to measure.
@@ -141,7 +145,8 @@ def measure_loop(experiment: str,
logger.info('Finished measure loop.')
-def measure_all_trials(experiment: str, max_total_time: int, pool, q) -> bool: # pylint: disable=invalid-name
+def measure_all_trials(experiment: str, max_total_time: int, pool, q,
+ region_coverage) -> bool: # pylint: disable=invalid-name
"""Get coverage data (with coverage runs) for all active trials. Note that
this should not be called unless multiprocessing.set_start_method('spawn')
was called first. Otherwise it will use fork which breaks logging."""
@@ -158,7 +163,7 @@ def measure_all_trials(experiment: str, max_total_time: int, pool, q) -> bool:
return False
measure_trial_coverage_args = [
- (unmeasured_snapshot, max_cycle, q)
+ (unmeasured_snapshot, max_cycle, q, region_coverage)
for unmeasured_snapshot in unmeasured_snapshots
]
@@ -356,8 +361,9 @@ class SnapshotMeasurer(coverage_utils.TrialCoverage): # pylint: disable=too-man
UNIT_BLACKLIST = collections.defaultdict(set)
+ # pylint: disable=too-many-arguments
def __init__(self, fuzzer: str, benchmark: str, trial_num: int,
- trial_logger: logs.Logger):
+ trial_logger: logs.Logger, region_coverage: bool):
super().__init__(fuzzer, benchmark, trial_num)
self.logger = trial_logger
self.corpus_dir = os.path.join(self.measurement_dir, 'corpus')
@@ -387,6 +393,9 @@ def __init__(self, fuzzer: str, benchmark: str, trial_num: int,
self.cov_summary_file = os.path.join(self.report_dir,
'cov_summary.json')
+ # Use region coverage as coverage metric instead of branch (default)
+ self.region_coverage = region_coverage
+
def get_profraw_files(self):
"""Return generated profraw files."""
return [
@@ -436,11 +445,14 @@ def get_current_coverage(self) -> int:
try:
coverage_info = coverage_utils.get_coverage_infomation(
self.cov_summary_file)
- coverage_data = coverage_info["data"][0]
- summary_data = coverage_data["totals"]
- regions_coverage_data = summary_data["regions"]
- regions_covered = regions_coverage_data["covered"]
- return regions_covered
+ coverage_data = coverage_info['data'][0]
+ summary_data = coverage_data['totals']
+ if self.region_coverage:
+ code_coverage_data = summary_data['regions']
+ else:
+ code_coverage_data = summary_data['branches']
+ code_coverage = code_coverage_data['covered']
+ return code_coverage
except Exception: # pylint: disable=broad-except
self.logger.error(
'Coverage summary json file defective or missing.')
@@ -612,8 +624,8 @@ def get_fuzzer_stats(stats_filestore_path):
def measure_trial_coverage( # pylint: disable=invalid-name
- measure_req, max_cycle: int,
- q: multiprocessing.Queue) -> models.Snapshot:
+ measure_req, max_cycle: int, q: multiprocessing.Queue,
+ region_coverage) -> models.Snapshot:
"""Measure the coverage obtained by |trial_num| on |benchmark| using
|fuzzer|."""
initialize_logs()
@@ -624,7 +636,8 @@ def measure_trial_coverage( # pylint: disable=invalid-name
try:
snapshot = measure_snapshot_coverage(measure_req.fuzzer,
measure_req.benchmark,
- measure_req.trial_id, cycle)
+ measure_req.trial_id, cycle,
+ region_coverage)
if not snapshot:
break
q.put(snapshot)
@@ -640,8 +653,8 @@ def measure_trial_coverage( # pylint: disable=invalid-name
def measure_snapshot_coverage( # pylint: disable=too-many-locals
- fuzzer: str, benchmark: str, trial_num: int,
- cycle: int) -> models.Snapshot:
+ fuzzer: str, benchmark: str, trial_num: int, cycle: int,
+ region_coverage: bool) -> models.Snapshot:
"""Measure coverage of the snapshot for |cycle| for |trial_num| of |fuzzer|
and |benchmark|."""
snapshot_logger = logs.Logger('measurer',
@@ -652,18 +665,18 @@ def measure_snapshot_coverage( # pylint: disable=too-many-locals
'cycle': str(cycle),
})
snapshot_measurer = SnapshotMeasurer(fuzzer, benchmark, trial_num,
- snapshot_logger)
+ snapshot_logger, region_coverage)
measuring_start_time = time.time()
snapshot_logger.info('Measuring cycle: %d.', cycle)
this_time = experiment_utils.get_cycle_time(cycle)
if snapshot_measurer.is_cycle_unchanged(cycle):
snapshot_logger.info('Cycle: %d is unchanged.', cycle)
- regions_covered = snapshot_measurer.get_current_coverage()
+ branches_covered = snapshot_measurer.get_current_coverage()
fuzzer_stats_data = snapshot_measurer.get_fuzzer_stats(cycle)
return models.Snapshot(time=this_time,
trial_id=trial_num,
- edges_covered=regions_covered,
+ edges_covered=branches_covered,
fuzzer_stats=fuzzer_stats_data,
crashes=[])
@@ -697,11 +710,11 @@ def measure_snapshot_coverage( # pylint: disable=too-many-locals
crashes = snapshot_measurer.process_crashes(cycle)
# Get the coverage of the new corpus units.
- regions_covered = snapshot_measurer.get_current_coverage()
+ branches_covered = snapshot_measurer.get_current_coverage()
fuzzer_stats_data = snapshot_measurer.get_fuzzer_stats(cycle)
snapshot = models.Snapshot(time=this_time,
trial_id=trial_num,
- edges_covered=regions_covered,
+ edges_covered=branches_covered,
fuzzer_stats=fuzzer_stats_data,
crashes=crashes)
diff --git a/experiment/measurer/test_coverage_utils.py b/experiment/measurer/test_coverage_utils.py
index e2c42ab6d..91db86a61 100644
--- a/experiment/measurer/test_coverage_utils.py
+++ b/experiment/measurer/test_coverage_utils.py
@@ -24,11 +24,12 @@ def get_test_data_path(*subpaths):
return os.path.join(TEST_DATA_PATH, *subpaths)
-def test_extract_covered_regions_from_summary_json(fs):
- """Tests that extract_covered_regions_from_summary_json returns the covered
- regions from summary json file."""
+def test_extract_covered_branches_from_summary_json(fs):
+ """Tests that extract_covered_branches_from_summary_json returns the covered
+ branches from summary json file."""
summary_json_file = get_test_data_path('cov_summary.json')
fs.add_real_file(summary_json_file, read_only=False)
- covered_regions = coverage_utils.extract_covered_regions_from_summary_json(
+ covered_branches = coverage_utils. \
+ extract_covered_branches_from_summary_json(
summary_json_file)
- assert len(covered_regions) == 15
+ assert len(covered_branches) == 9
diff --git a/experiment/measurer/test_data/cov_summary.json b/experiment/measurer/test_data/cov_summary.json
index 2350239ef..b31b619d2 100644
--- a/experiment/measurer/test_data/cov_summary.json
+++ b/experiment/measurer/test_data/cov_summary.json
@@ -1 +1 @@
-{"version":"2.0.0","type":"llvm.coverage.json.export","data":[{"files":[{"filename":"/home/test/fuzz_no_fuzzer.cc","segments":[[1,16,20,1,1],[1,17,20,1,1],[1,20,20,1,0],[1,24,2,1,1],[1,27,20,1,0],[1,28,0,0,0],[2,37,2,1,1],[3,24,22,1,1],[3,30,2,1,0],[3,32,20,1,1],[3,35,2,1,0],[3,36,20,1,0],[3,37,20,1,1],[3,39,20,1,1],[3,42,20,1,0],[3,48,2,1,0],[5,3,0,1,1],[6,2,0,0,0],[7,12,1,1,1],[11,3,0,1,1],[12,2,0,0,0]],"expansions":[{"source_region":[3,39,3,42,10,0,1,1],"target_regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]},{"source_region":[3,39,3,42,10,0,1,1],"target_regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]}],"summary":{"lines":{"count":11,"covered":9,"percent":81},"functions":{"count":2,"covered":2,"percent":100},"instantiations":{"count":3,"covered":3,"percent":100},"regions":{"count":10,"covered":8,"notcovered":2,"percent":80}}}],"functions":[{"name":"main","count":1,"regions":[[7,12,12,2,1,0,0,0],[11,3,12,2,0,0,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc"]},{"name":"_Z3fooIiEvT_","count":1,"regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]},{"name":"_Z3fooIfEvT_","count":1,"regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]}],"totals":{"lines":{"count":11,"covered":9,"percent":81},"functions":{"count":2,"covered":2,"percent":100},"instantiations":{"count":3,"covered":3,"percent":100},"regions":{"count":10,"covered":8,"notcovered":2,"percent":80}}}]}
\ No newline at end of file
+{"version":"2.0.0","type":"llvm.coverage.json.export","data":[{"files":[{"filename":"/home/test/fuzz_no_fuzzer.cc","segments":[[1,16,20,1,1],[1,17,20,1,1],[1,20,20,1,0],[1,24,2,1,1],[1,27,20,1,0],[1,28,0,0,0],[2,37,2,1,1],[3,24,22,1,1],[3,30,2,1,0],[3,32,20,1,1],[3,35,2,1,0],[3,36,20,1,0],[3,37,20,1,1],[3,39,20,1,1],[3,42,20,1,0],[3,48,2,1,0],[5,3,0,1,1],[6,2,0,0,0],[7,12,1,1,1],[11,3,0,1,1],[12,2,0,0,0]],"expansions":[{"source_region":[3,39,3,42,10,0,1,1],"target_regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]},{"source_region":[3,39,3,42,10,0,1,1],"target_regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]}],"summary":{"lines":{"count":11,"covered":9,"percent":81},"functions":{"count":2,"covered":2,"percent":100},"instantiations":{"count":3,"covered":3,"percent":100},"regions":{"count":10,"covered":8,"notcovered":2,"percent":80}}}],"functions":[{"branches":[[626,5,626,12,1,0,0,0,4],[626,16,626,24,0,0,0,0,4]],"name":"main","count":1,"regions":[[7,12,12,2,1,0,0,0],[11,3,12,2,0,0,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc"]},{"branches":[[1306,5,1306,31,0,1,0,0,4],[1309,52,1309,64,0,1,0,0,4],[1327,3,1327,21,0,1,0,0,4],[1328,3,1328,15,0,1,0,0,4],[1329,3,1329,17,1,0,0,0,4],[1341,5,1341,13,0,1,0,0,4]],"name":"_Z3fooIiEvT_","count":1,"regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]},{"branches":[[341,5,341,29,0,1,0,0,4],[341,33,341,71,0,0,0,0,4],[344,11,344,22,3,0,0,0,4]],"name":"_Z3fooIfEvT_","count":1,"regions":[[2,37,6,2,1,0,0,0],[3,24,3,30,11,0,0,0],[3,32,3,35,10,0,0,0],[3,36,3,37,10,0,0,3],[3,37,3,48,10,0,0,0],[3,39,3,42,10,0,1,1],[5,3,6,2,0,0,0,0],[1,16,1,28,10,1,0,0],[1,17,1,20,10,1,0,0],[1,24,1,27,1,1,0,0]],"filenames":["/home/test/fuzz_no_fuzzer.cc","/home/test/fuzz_no_fuzzer.cc"]}],"totals":{"lines":{"count":11,"covered":9,"percent":81},"functions":{"count":2,"covered":2,"percent":100},"instantiations":{"count":3,"covered":3,"percent":100},"regions":{"count":10,"covered":8,"notcovered":2,"percent":80},"branches":{"count":10,"covered":7,"notcovered":3,"percent":70}}}]}
\ No newline at end of file
diff --git a/experiment/measurer/test_measure_manager.py b/experiment/measurer/test_measure_manager.py
index f52bd3df4..fdc538d22 100644
--- a/experiment/measurer/test_measure_manager.py
+++ b/experiment/measurer/test_measure_manager.py
@@ -42,6 +42,7 @@
CYCLE = 1
SNAPSHOT_LOGGER = measure_manager.logger
+REGION_COVERAGE = False
# pylint: disable=unused-argument,invalid-name,redefined-outer-name,protected-access
@@ -59,34 +60,34 @@ def db_experiment(experiment_config, db):
def test_get_current_coverage(fs, experiment):
"""Tests that get_current_coverage reads the correct data from json file."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
json_cov_summary_file = get_test_data_path('cov_summary.json')
fs.add_real_file(json_cov_summary_file, read_only=False)
snapshot_measurer.cov_summary_file = json_cov_summary_file
- covered_regions = snapshot_measurer.get_current_coverage()
- assert covered_regions == 8
+ covered_branches = snapshot_measurer.get_current_coverage()
+ assert covered_branches == 7
def test_get_current_coverage_error(fs, experiment):
"""Tests that get_current_coverage returns None from a
defective json file."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
json_cov_summary_file = get_test_data_path('cov_summary_defective.json')
fs.add_real_file(json_cov_summary_file, read_only=False)
snapshot_measurer.cov_summary_file = json_cov_summary_file
- covered_regions = snapshot_measurer.get_current_coverage()
- assert not covered_regions
+ covered_branches = snapshot_measurer.get_current_coverage()
+ assert not covered_branches
def test_get_current_coverage_no_file(fs, experiment):
"""Tests that get_current_coverage returns None with no json file."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
json_cov_summary_file = get_test_data_path('cov_summary_not_exist.json')
snapshot_measurer.cov_summary_file = json_cov_summary_file
- covered_regions = snapshot_measurer.get_current_coverage()
- assert not covered_regions
+ covered_branches = snapshot_measurer.get_current_coverage()
+ assert not covered_branches
@mock.patch('common.new_process.execute')
@@ -94,7 +95,7 @@ def test_generate_profdata_create(mocked_execute, experiment, fs):
"""Tests that generate_profdata can run the correct command."""
mocked_execute.return_value = new_process.ProcessResult(0, '', False)
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
snapshot_measurer.profdata_file = '/work/reports/data.profdata'
snapshot_measurer.profraw_file_pattern = '/work/reports/data-%m.profraw'
profraw_file = '/work/reports/data-123.profraw'
@@ -116,7 +117,7 @@ def test_generate_profdata_merge(mocked_execute, experiment, fs):
"""Tests that generate_profdata can run correctly with existing profraw."""
mocked_execute.return_value = new_process.ProcessResult(0, '', False)
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
snapshot_measurer.profdata_file = '/work/reports/data.profdata'
snapshot_measurer.profraw_file_pattern = '/work/reports/data-%m.profraw'
profraw_file = '/work/reports/data-123.profraw'
@@ -144,7 +145,7 @@ def test_generate_summary(mocked_get_coverage_binary, mocked_execute,
mocked_get_coverage_binary.return_value = coverage_binary_path
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
snapshot_measurer.cov_summary_file = "/reports/cov_summary.txt"
snapshot_measurer.profdata_file = "/reports/data.profdata"
fs.create_dir('/reports')
@@ -176,9 +177,9 @@ def test_measure_trial_coverage(mocked_measure_snapshot_coverage, mocked_queue,
measure_request = measure_manager.SnapshotMeasureRequest(
FUZZER, BENCHMARK, TRIAL_NUM, min_cycle)
measure_manager.measure_trial_coverage(measure_request, max_cycle,
- mocked_queue())
+ mocked_queue(), False)
expected_calls = [
- mock.call(FUZZER, BENCHMARK, TRIAL_NUM, cycle)
+ mock.call(FUZZER, BENCHMARK, TRIAL_NUM, cycle, False)
for cycle in range(min_cycle, max_cycle + 1)
]
assert mocked_measure_snapshot_coverage.call_args_list == expected_calls
@@ -191,7 +192,7 @@ def test_measure_all_trials_not_ready(mocked_rsync, mocked_ls, experiment):
mocked_ls.return_value = new_process.ProcessResult(1, '', False)
assert measure_manager.measure_all_trials(
experiment_utils.get_experiment_name(), MAX_TOTAL_TIME,
- test_utils.MockPool(), queue.Queue())
+ test_utils.MockPool(), queue.Queue(), False)
assert not mocked_rsync.called
@@ -208,14 +209,14 @@ def test_measure_all_trials_no_more(mocked_directories_have_same_files,
mock_pool = test_utils.MockPool()
assert not measure_manager.measure_all_trials(
experiment_utils.get_experiment_name(), MAX_TOTAL_TIME, mock_pool,
- queue.Queue())
+ queue.Queue(), False)
def test_is_cycle_unchanged_doesnt_exist(experiment):
"""Test that is_cycle_unchanged can properly determine if a cycle is
unchanged or not when it needs to copy the file for the first time."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
this_cycle = 1
with test_utils.mock_popen_ctx_mgr(returncode=1):
assert not snapshot_measurer.is_cycle_unchanged(this_cycle)
@@ -227,7 +228,7 @@ def test_is_cycle_unchanged_first_copy(mocked_read, mocked_cp, experiment):
"""Test that is_cycle_unchanged can properly determine if a cycle is
unchanged or not when it needs to copy the file for the first time."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
this_cycle = 100
unchanged_cycles_file_contents = (
'\n'.join([str(num) for num in range(10)] + [str(this_cycle)]))
@@ -242,7 +243,7 @@ def test_is_cycle_unchanged_update(fs, experiment):
"""Test that is_cycle_unchanged can properly determine that a
cycle has changed when it has the file but needs to update it."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
this_cycle = 100
initial_unchanged_cycles_file_contents = (
@@ -266,7 +267,7 @@ def test_is_cycle_unchanged_skip_cp(mocked_cp, fs, experiment):
"""Check that is_cycle_unchanged doesn't call filestore_utils.cp
unnecessarily."""
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
this_cycle = 100
initial_unchanged_cycles_file_contents = (
'\n'.join([str(num) for num in range(10)] + [str(this_cycle + 1)]))
@@ -282,7 +283,7 @@ def test_is_cycle_unchanged_no_file(mocked_cp, fs, experiment):
unchanged-cycles file."""
# Make sure we log if there is no unchanged-cycles file.
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
mocked_cp.return_value = new_process.ProcessResult(1, '', False)
assert not snapshot_measurer.is_cycle_unchanged(0)
@@ -299,7 +300,7 @@ def test_run_cov_new_units(_, mocked_execute, fs, environ):
}
mocked_execute.return_value = new_process.ProcessResult(0, '', False)
snapshot_measurer = measure_manager.SnapshotMeasurer(
- FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
+ FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER, REGION_COVERAGE)
snapshot_measurer.initialize_measurement_dirs()
shared_units = ['shared1', 'shared2']
fs.create_file(snapshot_measurer.measured_files_path,
@@ -402,7 +403,8 @@ def test_measure_snapshot_coverage( # pylint: disable=too-many-locals
db_utils.add_all([trial])
snapshot_measurer = measure_manager.SnapshotMeasurer(
- trial.fuzzer, trial.benchmark, trial.id, SNAPSHOT_LOGGER)
+ trial.fuzzer, trial.benchmark, trial.id, SNAPSHOT_LOGGER,
+ REGION_COVERAGE)
# Set up the snapshot archive.
cycle = 1
@@ -418,7 +420,7 @@ def test_measure_snapshot_coverage( # pylint: disable=too-many-locals
# integration tests.
snapshot = measure_manager.measure_snapshot_coverage(
snapshot_measurer.fuzzer, snapshot_measurer.benchmark,
- snapshot_measurer.trial_num, cycle)
+ snapshot_measurer.trial_num, cycle, False)
assert snapshot
assert snapshot.time == cycle * experiment_utils.get_snapshot_seconds()
assert snapshot.edges_covered == 13178
diff --git a/experiment/run_experiment.py b/experiment/run_experiment.py
index cc721051a..4e7352cc5 100644
--- a/experiment/run_experiment.py
+++ b/experiment/run_experiment.py
@@ -240,6 +240,7 @@ def start_experiment( # pylint: disable=too-many-arguments
concurrent_builds=None,
measurers_cpus=None,
runners_cpus=None,
+ region_coverage=False,
custom_seed_corpus_dir=None):
"""Start a fuzzer benchmarking experiment."""
if not allow_uncommitted_changes:
@@ -269,6 +270,7 @@ def start_experiment( # pylint: disable=too-many-arguments
# 12GB is just the amount that KLEE needs, use this default to make KLEE
# experiments easier to run.
config['runner_memory'] = config.get('runner_memory', '12GB')
+ config['region_coverage'] = region_coverage
config['custom_seed_corpus_dir'] = custom_seed_corpus_dir
if config['custom_seed_corpus_dir']:
@@ -590,6 +592,12 @@ def main():
required=False,
default=False,
action='store_true')
+ parser.add_argument('-cr',
+ '--region-coverage',
+ help='Use region as coverage metric.',
+ required=False,
+ default=False,
+ action='store_true')
parser.add_argument(
'-o',
'--oss-fuzz-corpus',
@@ -646,6 +654,7 @@ def main():
concurrent_builds=concurrent_builds,
measurers_cpus=measurers_cpus,
runners_cpus=runners_cpus,
+ region_coverage=args.region_coverage,
custom_seed_corpus_dir=args.custom_seed_corpus_dir)
return 0