From 039dea9129209fe8fb878bded578f86112188ba8 Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Wed, 7 Feb 2024 21:14:18 -0800 Subject: [PATCH 001/108] HDDS-10229. Fixes for Grafana dashboards (#6120) --- .../dashboards/Ozone - JVM Metrics.json | 357 +++++------------- .../dashboards/Ozone - ListKey Metrics.json | 85 +---- .../dashboards/Ozone - Object Metrics.json | 1 - 3 files changed, 118 insertions(+), 325 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json index 339a7b005e11..73c6722176ec 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - JVM Metrics.json @@ -1,43 +1,9 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "9.1.5" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, + "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -74,8 +40,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -152,8 +117,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "code", "expr": "increase(jvm_metrics_gc_time_millis{processname=\"OzoneManager\"}[1m]) / 60", @@ -167,8 +131,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -244,8 +207,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"OzoneManager\"}", @@ -269,8 +231,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -346,11 +307,10 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9875\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9875|.+9876\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -375,8 +335,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -451,8 +410,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"OzoneManager\"}", @@ -466,8 +424,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -526,7 +483,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn02.cdip.cisco.local:9875" ], "prefix": "All except:", "readOnly": true @@ -567,8 +523,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"OzoneManager\"}", @@ -582,8 +537,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -658,8 +612,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"OzoneManager\"}", @@ -687,8 +640,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -748,7 +700,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn02.cdip.cisco.local:9875" ], "prefix": "All except:", "readOnly": true @@ -789,8 +740,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"OzoneManager\"}", @@ -804,8 +754,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -881,8 +830,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"OzoneManager\"}", @@ -910,8 +858,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -971,7 +918,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn02.cdip.cisco.local:9889" ], "prefix": "All except:", "readOnly": true @@ -1012,8 +958,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "exemplar": false, @@ -1030,8 +975,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1106,8 +1050,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"Recon\"}", @@ -1135,8 +1078,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1212,11 +1154,10 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9889\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9889|.+9888\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -1241,8 +1182,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1317,8 +1257,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"Recon\"}", @@ -1332,8 +1271,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1408,8 +1346,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"Recon\"}", @@ -1423,8 +1360,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1499,8 +1435,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"Recon\"}", @@ -1528,8 +1463,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1589,7 +1523,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn02.cdip.cisco.local:9889" ], "prefix": "All except:", "readOnly": true @@ -1630,8 +1563,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"Recon\"}", @@ -1645,8 +1577,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1706,7 +1637,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn02.cdip.cisco.local:9889" ], "prefix": "All except:", "readOnly": true @@ -1747,8 +1677,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"Recon\"}", @@ -1776,8 +1705,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1837,7 +1765,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -1878,8 +1805,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "increase(jvm_metrics_gc_time_millis{processname=\"HddsDatanode\"}[1m]) / 60", @@ -1893,8 +1819,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -1969,8 +1894,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"HddsDatanode\"}", @@ -1998,8 +1922,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2075,11 +1998,10 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9883\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9883|.+9882\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -2104,8 +2026,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2180,8 +2101,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"HddsDatanode\"}", @@ -2195,8 +2115,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2255,7 +2174,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -2296,8 +2214,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"HddsDatanode\"}", @@ -2311,8 +2228,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2387,8 +2303,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"HddsDatanode\"}", @@ -2416,8 +2331,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2477,7 +2391,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -2518,8 +2431,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"HddsDatanode\"}", @@ -2533,8 +2445,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2594,7 +2505,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -2635,8 +2545,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"HddsDatanode\"}", @@ -2664,8 +2573,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2741,8 +2649,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "increase(jvm_metrics_gc_time_millis{processname=\"StorageContainerManager\"}[1m]) / 60", @@ -2756,8 +2663,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2832,8 +2738,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"StorageContainerManager\"}", @@ -2861,8 +2766,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -2938,11 +2842,10 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9877\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9877|.+9876\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -2967,8 +2870,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3043,8 +2945,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"StorageContainerManager\"}", @@ -3058,8 +2959,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3134,8 +3034,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"StorageContainerManager\"}", @@ -3149,8 +3048,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3225,8 +3123,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"StorageContainerManager\"}", @@ -3254,8 +3151,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3315,7 +3211,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn01.cdip.cisco.local:9877" ], "prefix": "All except:", "readOnly": true @@ -3356,8 +3251,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"StorageContainerManager\"}", @@ -3371,8 +3265,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3432,7 +3325,6 @@ "options": { "mode": "exclude", "names": [ - "rhelnn01.cdip.cisco.local:9877" ], "prefix": "All except:", "readOnly": true @@ -3473,8 +3365,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"StorageContainerManager\"}", @@ -3502,8 +3393,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3563,7 +3453,6 @@ "options": { "mode": "exclude", "names": [ - "rhel04.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -3604,8 +3493,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "exemplar": false, @@ -3617,8 +3505,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "hide": false, "refId": "B" @@ -3629,8 +3516,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3705,8 +3591,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count{processname=\"S3Gateway\"}", @@ -3734,8 +3619,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3795,7 +3679,6 @@ "options": { "mode": "exclude", "names": [ - "rhel02.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -3836,11 +3719,10 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "code", - "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9879\"}", + "expr": "jvm_metrics_cpu_jvm_load{instance=~\".+9879|.+9878\"}", "legendFormat": "{{instance}}", "range": true, "refId": "A" @@ -3865,8 +3747,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -3941,8 +3822,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable{processname=\"S3Gateway\"}", @@ -3956,8 +3836,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4032,8 +3911,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked{processname=\"S3Gateway\"}", @@ -4047,8 +3925,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4123,8 +4000,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting{processname=\"S3Gateway\"}", @@ -4152,8 +4028,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4213,7 +4088,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -4254,8 +4128,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m{processname=\"S3Gateway\"}", @@ -4269,8 +4142,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4330,7 +4202,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9879" ], "prefix": "All except:", "readOnly": true @@ -4371,8 +4242,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m{processname=\"S3Gateway\"}", @@ -4400,8 +4270,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4477,8 +4346,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "increase(jvm_metrics_gc_time_millis[1m]) / 60", @@ -4492,8 +4360,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4568,8 +4435,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_gc_count", @@ -4597,8 +4463,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4674,8 +4539,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_cpu_jvm_load", @@ -4703,8 +4567,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4779,8 +4642,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_runnable", @@ -4794,8 +4656,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4870,8 +4731,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_blocked", @@ -4885,8 +4745,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -4961,8 +4820,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_threads_waiting", @@ -4990,8 +4848,7 @@ "panels": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -5051,7 +4908,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -5092,8 +4948,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_heap_used_m", @@ -5107,8 +4962,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -5168,7 +5022,6 @@ "options": { "mode": "exclude", "names": [ - "rhel01.cdip.cisco.local:9883" ], "prefix": "All except:", "readOnly": true @@ -5209,8 +5062,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "type": "prometheus" }, "editorMode": "builder", "expr": "jvm_metrics_mem_non_heap_used_m", @@ -5243,7 +5095,6 @@ "timepicker": {}, "timezone": "", "title": "JVM Metrics", - "uid": "DtIgEEmSz", "version": 16, "weekStart": "" -} \ No newline at end of file +} diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json index a0771b509075..c5db476b69a2 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json @@ -1,51 +1,9 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS-1", - "label": "Prometheus-1", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - }, - { - "name": "DS_PROMETHEUS-0", - "label": "Prometheus-0", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.1.1" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, + "datasource": "-- Grafana --", "enable": true, "hide": true, "iconColor": "rgba(0, 211, 255, 1)", @@ -76,8 +34,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -138,7 +95,6 @@ "options": { "mode": "exclude", "names": [ - "mus-test2-1.mus-test2.root.hwx.site" ], "prefix": "All except:", "readOnly": true @@ -179,8 +135,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", @@ -212,8 +167,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -290,8 +244,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", @@ -306,8 +259,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", @@ -327,8 +279,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-0}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -406,8 +357,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-0}" + "type": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", @@ -426,8 +376,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -488,7 +437,6 @@ "options": { "mode": "exclude", "names": [ - "mus-test2-1.mus-test2.root.hwx.site:9875" ], "prefix": "All except:", "readOnly": true @@ -529,12 +477,11 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "disableTextWrap": false, - "editorMode": "builder", - "expr": "om_performance_metrics_list_keys_ops_per_sec_num_ops", + "editorMode": "code", + "expr": "rate(om_performance_metrics_list_keys_latency_ns_num_ops[60s])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -549,8 +496,7 @@ }, { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "fieldConfig": { "defaults": { @@ -611,7 +557,6 @@ "options": { "mode": "exclude", "names": [ - "mus-test2-1.mus-test2.root.hwx.site:9875" ], "prefix": "All except:", "readOnly": true @@ -652,8 +597,7 @@ "targets": [ { "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS-1}" + "type": "prometheus" }, "disableTextWrap": false, "editorMode": "builder", @@ -685,7 +629,6 @@ "timepicker": {}, "timezone": "", "title": "ListKey Dashboard", - "uid": "cac0d75b-49a2-41f2-b8bf-57f9c86bfa8c", "version": 14, "weekStart": "" } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json index 7644b12f2a25..dced4f391b17 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - Object Metrics.json @@ -1339,6 +1339,5 @@ }, "timezone": "", "title": "Ozone - Object Metrics", - "uid": "yakEh0Eik", "version": 1 } From 3d8365ffd2c7d36a65a10ead8e7306195164c931 Mon Sep 17 00:00:00 2001 From: Sumit Agrawal Date: Thu, 8 Feb 2024 11:57:07 +0530 Subject: [PATCH 002/108] HDDS-10296. Orphan blocks during overwrite of key. (#6180) --- .../ozone/om/TestSnapshotDeletingService.java | 5 +- .../om/request/key/OMKeyCommitRequest.java | 8 ++- .../key/OMKeyCommitRequestWithFSO.java | 8 ++- .../request/key/TestOMKeyCommitRequest.java | 60 +++++++++++-------- 4 files changed, 49 insertions(+), 32 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 6e3e4fd7f404..e627a880fd21 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -484,8 +484,9 @@ private void createSnapshotDataForBucket1() throws Exception { client.getProxy().deleteKey(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1key0", false); assertTableRowCount(keyTable, 0); - // bucket1key0 should also be reclaimed as it not same - assertTableRowCount(deletedTable, 1); + // one copy of bucket1key0 should also be reclaimed as it not same + // but original deleted key created during overwrite should not be deleted + assertTableRowCount(deletedTable, 2); // Create Snapshot 2. client.getProxy().createSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index e09c3bcef669..b28b390efd73 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -270,8 +270,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn keyToDelete, trxnLogIndex, ozoneManager.isRatisEnabled()); checkBucketQuotaInBytes(omMetadataManager, omBucketInfo, correctedSpace); + // using pseudoObjId as objectId can be same in case of overwrite key + long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); String delKeyName = omMetadataManager.getOzoneDeletePathKey( - keyToDelete.getObjectID(), dbOzoneKey); + pseudoObjId, dbOzoneKey); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -303,8 +305,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, - new RepeatedOmKeyInfo(pseudoKeyInfo)); + oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, + key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index f062e71106e0..704e9e91c47d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -203,8 +203,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); String delKeyName = omMetadataManager .getOzoneKey(volumeName, bucketName, fileName); + // using pseudoObjId as objectId can be same in case of overwrite key + long pseudoObjId = ozoneManager.getObjectIdFromTxId(trxnLogIndex); delKeyName = omMetadataManager.getOzoneDeletePathKey( - keyToDelete.getObjectID(), delKeyName); + pseudoObjId, delKeyName); if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } @@ -238,8 +240,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn if (null == oldKeyVersionsToDeleteMap) { oldKeyVersionsToDeleteMap = new HashMap<>(); } - oldKeyVersionsToDeleteMap.put(delKeyName, - new RepeatedOmKeyInfo(pseudoKeyInfo)); + oldKeyVersionsToDeleteMap.computeIfAbsent(delKeyName, + key -> new RepeatedOmKeyInfo()).addOmKeyInfo(pseudoKeyInfo); } // Add to cache of open key table and key table. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index 3251fff97490..cffbe5ea3023 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -27,6 +27,9 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -56,10 +59,13 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; /** * Class tests OMKeyCommitRequest class. @@ -555,16 +561,17 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { @Test public void testValidateAndUpdateCacheOnOverwrite() throws Exception { + when(ozoneManager.getObjectIdFromTxId(anyLong())).thenAnswer(tx -> + OmUtils.getObjectIdFromTxId(2, tx.getArgument(0))); testValidateAndUpdateCache(); // Become a new client and set next version number clientID = Time.now(); version += 1; - OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest()); + OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest(getKeyLocation(10).subList(4, 10), false)); - OMKeyCommitRequest omKeyCommitRequest = - getOmKeyCommitRequest(modifiedOmRequest); + OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); KeyArgs keyArgs = modifiedOmRequest.getCommitKeyRequest().getKeyArgs(); @@ -576,49 +583,54 @@ public void testValidateAndUpdateCacheOnOverwrite() throws Exception { assertNotNull(omKeyInfo); // Previously committed version - assertEquals(0L, - omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(0L, omKeyInfo.getLatestVersionLocations().getVersion()); // Append new blocks List allocatedLocationList = - keyArgs.getKeyLocationsList().stream() - .map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + keyArgs.getKeyLocationsList().stream() + .map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); addKeyToOpenKeyTable(allocatedLocationList); OMClientResponse omClientResponse = omKeyCommitRequest.validateAndUpdateCache(ozoneManager, 102L); - assertEquals(OzoneManagerProtocolProtos.Status.OK, - omClientResponse.getOMResponse().getStatus()); + assertEquals(OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); // New entry should be created in key Table. - omKeyInfo = - omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()) - .get(ozoneKey); + omKeyInfo = omMetadataManager.getKeyTable(omKeyCommitRequest.getBucketLayout()).get(ozoneKey); assertNotNull(omKeyInfo); - assertEquals(version, - omKeyInfo.getLatestVersionLocations().getVersion()); + assertEquals(version, omKeyInfo.getLatestVersionLocations().getVersion()); // DB keyInfo format verifyKeyName(omKeyInfo); // Check modification time CommitKeyRequest commitKeyRequest = modifiedOmRequest.getCommitKeyRequest(); - assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), - omKeyInfo.getModificationTime()); + assertEquals(commitKeyRequest.getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); // Check block location. List locationInfoListFromCommitKeyRequest = - commitKeyRequest.getKeyArgs() - .getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) - .collect(Collectors.toList()); + commitKeyRequest.getKeyArgs().getKeyLocationsList().stream().map(OmKeyLocationInfo::getFromProtobuf) + .collect(Collectors.toList()); - assertEquals(locationInfoListFromCommitKeyRequest, - omKeyInfo.getLatestVersionLocations().getLocationList()); - assertEquals(allocatedLocationList, - omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(locationInfoListFromCommitKeyRequest, omKeyInfo.getLatestVersionLocations().getLocationList()); + assertEquals(allocatedLocationList, omKeyInfo.getLatestVersionLocations().getLocationList()); assertEquals(1, omKeyInfo.getKeyLocationVersions().size()); + + // flush response content to db + BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation(); + ((OMKeyCommitResponse) omClientResponse).addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // verify deleted key is unique generated + String deletedKey = omMetadataManager.getOzoneKey(volumeName, omKeyInfo.getBucketName(), keyName); + List> rangeKVs + = omMetadataManager.getDeletedTable().getRangeKVs(null, 100, deletedKey); + assertThat(rangeKVs.size()).isGreaterThan(0); + assertEquals(1, rangeKVs.get(0).getValue().getOmKeyInfoList().size()); + assertFalse(rangeKVs.get(0).getKey().endsWith(rangeKVs.get(0).getValue().getOmKeyInfoList().get(0).getObjectID() + + "")); } /** From 06399b5dcbea7d0bb7967b5d6da5e00196667a38 Mon Sep 17 00:00:00 2001 From: Galsza <109229906+Galsza@users.noreply.github.com> Date: Thu, 8 Feb 2024 10:28:53 +0100 Subject: [PATCH 003/108] HDDS-10226. Refactor OMRequestTestUtils.createOmKeyInfo (#6184) --- .../apache/hadoop/ozone/debug/TestLDBCli.java | 5 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 23 +- .../ozone/om/TestOmMetadataManager.java | 37 +-- .../ozone/om/request/OMRequestTestUtils.java | 240 +++++------------- .../bucket/TestOMBucketDeleteRequest.java | 13 +- .../file/TestOMDirectoryCreateRequest.java | 11 +- .../TestOMDirectoryCreateRequestWithFSO.java | 19 +- .../request/file/TestOMFileCreateRequest.java | 48 ++-- .../file/TestOMFileCreateRequestWithFSO.java | 35 ++- .../file/TestOMRecoverLeaseRequest.java | 12 +- .../key/TestOMAllocateBlockRequest.java | 8 +- .../TestOMAllocateBlockRequestWithFSO.java | 14 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 2 +- .../om/request/key/TestOMKeyAclRequest.java | 6 +- .../key/TestOMKeyAclRequestWithFSO.java | 23 +- .../request/key/TestOMKeyCommitRequest.java | 6 +- .../key/TestOMKeyCommitRequestWithFSO.java | 17 +- .../request/key/TestOMKeyCreateRequest.java | 6 +- .../key/TestOMKeyCreateRequestWithFSO.java | 17 +- .../request/key/TestOMKeyDeleteRequest.java | 4 +- .../key/TestOMKeyDeleteRequestWithFSO.java | 24 +- .../key/TestOMKeyPurgeRequestAndResponse.java | 2 +- .../request/key/TestOMKeyRenameRequest.java | 2 +- .../key/TestOMKeyRenameRequestWithFSO.java | 13 +- .../om/request/key/TestOMKeyRequest.java | 8 +- .../request/key/TestOMKeysDeleteRequest.java | 6 +- .../key/TestOMKeysDeleteRequestWithFSO.java | 16 +- .../request/key/TestOMKeysRenameRequest.java | 7 +- .../om/request/key/TestOMSetTimesRequest.java | 2 +- .../key/TestOMSetTimesRequestWithFSO.java | 15 +- ...S3ExpiredMultipartUploadsAbortRequest.java | 19 +- ...estS3MultipartUploadCommitPartRequest.java | 7 +- ...ltipartUploadCommitPartRequestWithFSO.java | 17 +- .../TestS3MultipartUploadCompleteRequest.java | 6 +- ...MultipartUploadCompleteRequestWithFSO.java | 17 +- .../snapshot/TestOMSnapshotCreateRequest.java | 12 +- .../file/TestOMDirectoryCreateResponse.java | 4 +- .../file/TestOMFileCreateResponseWithFSO.java | 11 +- .../key/TestOMAllocateBlockResponse.java | 2 +- .../TestOMAllocateBlockResponseWithFSO.java | 16 +- .../response/key/TestOMKeyCommitResponse.java | 4 +- .../key/TestOMKeyCommitResponseWithFSO.java | 26 +- .../key/TestOMKeyCreateResponseWithFSO.java | 15 +- .../response/key/TestOMKeyDeleteResponse.java | 6 +- .../key/TestOMKeyDeleteResponseWithFSO.java | 26 +- .../response/key/TestOMKeyRenameResponse.java | 6 +- .../key/TestOMKeyRenameResponseWithFSO.java | 34 +-- .../om/response/key/TestOMKeyResponse.java | 21 +- .../key/TestOMKeysDeleteResponse.java | 4 +- .../key/TestOMKeysDeleteResponseWithFSO.java | 13 +- .../key/TestOMKeysRenameResponse.java | 5 +- .../key/TestOMOpenKeysDeleteResponse.java | 2 +- ...3ExpiredMultipartUploadsAbortResponse.java | 14 +- ...ultipartUploadCompleteResponseWithFSO.java | 56 ++-- .../service/TestDirectoryDeletingService.java | 15 +- .../ozone/om/service/TestQuotaRepairTask.java | 21 +- 56 files changed, 483 insertions(+), 537 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index ec7eb81db33d..e94f46a398b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -22,6 +22,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -61,6 +62,7 @@ import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.STAND_ALONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -295,8 +297,7 @@ private void prepareTable(String tableName, boolean schemaV3) for (int i = 1; i <= 5; i++) { String key = "key" + i; OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("vol1", "buck1", - key, HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE); + key, ReplicationConfig.fromProtoTypeAndFactor(STAND_ALONE, HddsProtos.ReplicationFactor.ONE)).build(); keyTable.put(key.getBytes(UTF_8), value.getProtobuf(ClientVersion.CURRENT_VERSION).toByteArray()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 67ab3169b69c..ba98a28280a9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -44,9 +44,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -976,12 +974,11 @@ public void testListStatusWithTableCache() throws Exception { if (i % 2 == 0) { // Add to DB OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKeyInDB + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); } } @@ -1048,13 +1045,12 @@ public void testListStatusWithTableCacheRecursive() throws Exception { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); } else { // Add to TableCache OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); } } @@ -1092,13 +1088,12 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, - 1000L, HddsProtos.ReplicationType.RATIS, - ONE, metadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), metadataManager); existKeySet.add(prefixKey + i); } else { OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKey + i, - HddsProtos.ReplicationType.RATIS, ONE, + RatisReplicationConfig.getInstance(ONE), metadataManager); String key = metadataManager.getOzoneKey( @@ -1446,8 +1441,7 @@ public void testRefreshPipeline() throws Exception { when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); @@ -1501,8 +1495,7 @@ public void testRefreshPipelineException() throws Exception { OMPerformanceMetrics metrics = mock(OMPerformanceMetrics.class); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + "b1", "k1", RatisReplicationConfig.getInstance(THREE)).build(); // Add block to key. List omKeyLocationInfoList = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index e6debcdc23be..15af3910e90f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -26,6 +27,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -49,6 +51,7 @@ import java.io.File; import java.time.Duration; import java.time.Instant; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; @@ -61,6 +64,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD; @@ -619,9 +623,10 @@ private void testGetExpiredOpenKeys(BucketLayout bucketLayout) for (int i = 0; i < numExpiredOpenKeys + numUnexpiredOpenKeys; i++) { final long creationTime = i < numExpiredOpenKeys ? expiredOpenKeyCreationTime : Time.now(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, creationTime); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(creationTime) + .build(); final String dbOpenKeyName; if (bucketLayout.isFileSystemOptimized()) { @@ -689,10 +694,10 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( // Ensure that "expired" MPU-related open keys are not fetched. // MPU-related open keys, identified by isMultipartKey = false for (int i = 0; i < numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, expiredOpenKeyCreationTime, true); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setCreationTime(expiredOpenKeyCreationTime) + .build(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -722,10 +727,10 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( // HDDS-9017. Although these open keys are MPU-related, // the isMultipartKey flags are set to false for (int i = numExpiredMPUOpenKeys; i < 2 * numExpiredMPUOpenKeys; i++) { - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, "expired" + i, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, expiredOpenKeyCreationTime, false); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo( + volumeName, bucketName, "expired" + i, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(expiredOpenKeyCreationTime) + .build(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. @@ -788,8 +793,9 @@ private void testGetExpiredMPUs() throws Exception { String keyName = "expired" + i; // Key info to construct the MPU DB key final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, 0L, creationTime); + bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setCreationTime(creationTime) + .build(); for (int j = 1; j <= numPartsPerMPU; j++) { @@ -861,11 +867,10 @@ private void addKeysToOM(String volumeName, String bucketName, if (i % 2 == 0) { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - 1000L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + 1000L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); } else { OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + RatisReplicationConfig.getInstance(ONE), omMetadataManager); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index bdc6509247b1..21b94ce5f05a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -157,23 +157,22 @@ public static void addVolumeAndBucketToDB( @SuppressWarnings("parameterNumber") public static void addKeyToTableAndCache(String volumeName, String bucketName, - String keyName, long clientID, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + String keyName, long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(false, true, volumeName, bucketName, keyName, clientID, - replicationType, replicationFactor, trxnLogIndex, omMetadataManager); + replicationConfig, trxnLogIndex, omMetadataManager); } /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. + * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @param locationList * @throws Exception @@ -181,12 +180,11 @@ public static void addKeyToTableAndCache(String volumeName, String bucketName, @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 0L, omMetadataManager, + clientID, replicationConfig, 0L, omMetadataManager, locationList, version); } @@ -194,24 +192,23 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. + * * @param openKeyTable * @param volumeName * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, 0L, omMetadataManager); + clientID, replicationConfig, 0L, omMetadataManager); } /** @@ -225,20 +222,17 @@ public static void addKeyToTable(boolean openKeyTable, String volumeName, * @param bucketName * @param keyName * @param clientID - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager * @throws Exception */ @SuppressWarnings("parameterNumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) throws Exception { addKeyToTable(openKeyTable, isMultipartKey, false, - volumeName, bucketName, keyName, clientID, replicationType, - replicationFactor, 0L, omMetadataManager); + volumeName, bucketName, keyName, clientID, replicationConfig, 0L, omMetadataManager); } /** @@ -248,19 +242,20 @@ public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, */ @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, - String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + String volumeName, String bucketName, String keyName, long clientID, ReplicationConfig replicationConfig, + long trxnLogIndex, OMMetadataManager omMetadataManager, List locationList, long version) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex, Time.now(), version, - false); + replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setObjectID(trxnLogIndex) + .build(); + omKeyInfo.appendNewBlocks(locationList, false); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, - omMetadataManager); + omMetadataManager); } /** @@ -271,12 +266,11 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean addToCache, String volumeName, String bucketName, String keyName, long clientID, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { - OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex); + OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig) + .setObjectID(trxnLogIndex).build(); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -290,13 +284,13 @@ public static void addKeyToTable(boolean openKeyTable, boolean addToCache, @SuppressWarnings("parameternumber") public static void addKeyToTable(boolean openKeyTable, boolean isMultipartKey, boolean addToCache, String volumeName, String bucketName, String keyName, - long clientID, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long trxnLogIndex, + long clientID, ReplicationConfig replicationConfig, long trxnLogIndex, OMMetadataManager omMetadataManager) throws Exception { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, trxnLogIndex, Time.now(), 0L, - isMultipartKey); + replicationConfig, new OmKeyLocationInfoGroup(0, new ArrayList<>(), isMultipartKey)) + .setObjectID(trxnLogIndex) + .build(); addKeyToTable(openKeyTable, addToCache, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -431,23 +425,22 @@ public static void addPart(PartKeyInfo partKeyInfo, /** * Add key entry to key table cache. + * * @param volumeName * @param bucketName * @param keyName - * @param replicationType - * @param replicationFactor + * @param replicationConfig * @param omMetadataManager */ @SuppressWarnings("parameterNumber") public static void addKeyToTableCache(String volumeName, String bucketName, String keyName, - HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, + ReplicationConfig replicationConfig, OMMetadataManager omMetadataManager) { OmKeyInfo omKeyInfo = createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); + replicationConfig).build(); omMetadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry( new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, @@ -543,87 +536,42 @@ public static void addSnapshotToTable( /** * Create OmKeyInfo. + * Initializes most values to a sensible default. */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, 0L); + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, + String keyName, ReplicationConfig replicationConfig, OmKeyLocationInfoGroup omKeyLocationInfoGroup) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFileName(OzoneFSUtils.getFileName(keyName)) + .setReplicationConfig(replicationConfig) + .setObjectID(0L) + .setUpdateID(0L) + .setCreationTime(Time.now()) + .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) + .setDataSize(1000L); + } + + public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucketName, + String keyName, ReplicationConfig replicationConfig) { + return createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig, + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), false)); } /** * Create OmDirectoryInfo. */ public static OmDirectoryInfo createOmDirectoryInfo(String keyName, - long objectID, - long parentObjID) { + long objectID, + long parentObjID) { return new OmDirectoryInfo.Builder() - .setName(keyName) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setObjectID(objectID) - .setParentObjectID(parentObjID) - .setUpdateID(50) - .build(); - } - - /** - * Create OmKeyInfo. - */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, Time.now()); - } - - /** - * Create OmKeyInfo. - */ - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, creationTime, 0L, false); - } - - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime, boolean isMultipartKey) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, creationTime, 0L, isMultipartKey); - } - - /** - * Create OmKeyInfo for LEGACY/OBS bucket. - */ - @SuppressWarnings("parameterNumber") - private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long creationTime, long version, boolean isMultipartKey) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setFileName(OzoneFSUtils.getFileName(keyName)) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(version, new ArrayList<>(), - isMultipartKey))) - .setCreationTime(creationTime) + .setName(keyName) + .setCreationTime(Time.now()) .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationConfig( - ReplicationConfig - .fromProtoTypeAndFactor(replicationType, replicationFactor)) .setObjectID(objectID) - .setUpdateID(objectID) + .setParentObjectID(parentObjID) + .setUpdateID(50) .build(); } @@ -631,8 +579,8 @@ private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, * Create OmMultipartKeyInfo for OBS/LEGACY bucket. */ public static OmMultipartKeyInfo createOmMultipartKeyInfo(String uploadId, - long creationTime, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID) { + long creationTime, HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor replicationFactor, long objectID) { return new OmMultipartKeyInfo.Builder() .setUploadID(uploadId) .setCreationTime(creationTime) @@ -1408,76 +1356,6 @@ public static void addVolumeToOM(OMMetadataManager omMetadataManager, CacheValue.get(1L, omVolumeArgs)); } - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime) { - return createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, objectID, - parentID, trxnLogIndex, creationTime, 0L, false); - } - - /** - * Create OmKeyInfo with isMultipartKey flag. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, - boolean isMultipartKey) { - return createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, objectID, - parentID, trxnLogIndex, creationTime, 0L, isMultipartKey); - } - - /** - * Create OmKeyInfo. - */ - @SuppressWarnings("parameterNumber") - public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, long version) { - return createOmKeyInfo(volumeName, bucketName, keyName, replicationType, - replicationFactor, objectID, parentID, trxnLogIndex, creationTime, - version, false); - } - - /** - * Create OmKeyInfo for FSO bucket. - */ - @SuppressWarnings("parameterNumber") - private static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor, long objectID, - long parentID, long trxnLogIndex, long creationTime, long version, - boolean isMultipartKey) { - String fileName = OzoneFSUtils.getFileName(keyName); - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(version, new ArrayList<>(), - isMultipartKey))) - .setCreationTime(creationTime) - .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationConfig(ReplicationConfig - .fromProtoTypeAndFactor(replicationType, replicationFactor)) - .setObjectID(objectID) - .setUpdateID(trxnLogIndex) - .setParentObjectID(parentID) - .setFileName(fileName) - .build(); - } - - /** * Add key entry to KeyTable. if openKeyTable flag is true, add's entries * to openKeyTable, else add's it to keyTable. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index 34f348a688dc..fdc13e369c08 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -19,16 +19,21 @@ package org.apache.hadoop.ozone.om.request.bucket; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; + +import java.util.ArrayList; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; @@ -119,12 +124,10 @@ public void testBucketContainsIncompleteMPUs() throws Exception { new OMBucketDeleteRequest(omRequest); // Create a MPU key in the MPU table to simulate incomplete MPU - long creationTime = Time.now(); String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); - final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, UUID.randomUUID().toString(), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, creationTime, true); + final OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, UUID.randomUUID().toString(), + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .build(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. createOmMultipartKeyInfo(uploadId, Time.now(), HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 275e8a6f2aae..7af60c18d94a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -27,7 +27,7 @@ import java.util.stream.Collectors; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -297,8 +298,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName.substring(0, 12), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = @@ -340,7 +340,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, + RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -383,8 +383,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { omMetadataManager); // Add a key with first two levels. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName.substring(0, 11), 1L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); OMDirectoryCreateRequest omDirectoryCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 0eceb2246ee2..e0460ba81a99 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; @@ -59,6 +59,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -422,8 +423,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { // Add a file into the FileTable, this is to simulate "file exists" check. OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, objID++); + bucketName, keyName, RatisReplicationConfig.getInstance(THREE)).setObjectID(objID++).build(); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); @@ -492,21 +492,22 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() // for index=0, parentID is bucketID OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( - dirs.get(0), objID++, parentID); + dirs.get(0), objID++, parentID); OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, - volumeName, bucketName, txnID, omMetadataManager); + volumeName, bucketName, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, objID); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(THREE)) + .setObjectID(objID) + .build(); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omBucketInfo.getObjectID(); final String ozoneKey = omMetadataManager.getOzonePathKey( - volumeId, bucketId, parentID, dirs.get(1)); + volumeId, bucketId, parentID, dirs.get(1)); ++txnID; omMetadataManager.getKeyTable(getBucketLayout()) .addCacheEntry(new CacheKey<>(ozoneKey), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index b39068fd7341..74b067a76a45 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -24,6 +24,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -190,7 +191,7 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() .setBucketName(bucketName) .setBucketLayout(getBucketLayout()) .setQuotaInNamespace(1)); - + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -243,19 +244,17 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); testNonRecursivePath("a/b", false, false, true); + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/d", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/", 0L, replicationConfig, omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -275,14 +274,14 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/e/f", 0L, replicationConfig, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/d", 0L, replicationConfig, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); } @@ -293,16 +292,17 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() String key = "c/d/e/f"; // Should be able to create file even if parent directories does not exist testNonRecursivePath(key, false, true, false); - + // 3 parent directory created c/d/e assertEquals(omMetadataManager.getBucketTable().get( omMetadataManager.getBucketKey(volumeName, bucketName)) .getUsedNamespace(), 3); - + // Add the key to key table + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + key, 0L, replicationConfig, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true @@ -315,23 +315,21 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() throws Exception { String key = "c/d/e/f"; + ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/", 0L, replicationConfig, omMetadataManager); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "c/d/e/", 0L, replicationConfig, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - key, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + key, 0L, replicationConfig, omMetadataManager); // Even if key exists, should be able to create file as overwrite is set // to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index 1b7b7452c82c..e988949c5b85 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -28,11 +29,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -55,8 +56,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { "a/b/c", omMetadataManager); String fileNameD = "d"; OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + "a/b/c/" + fileNameD, 0L, RatisReplicationConfig.getInstance(ONE), omMetadataManager); // cannot create file if directory of same name exists testNonRecursivePath("a/b/c", false, false, true); @@ -80,7 +80,7 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, - "/test/a1/a2", HddsProtos.ReplicationFactor.ONE, + "/test/a1/a2", ONE, HddsProtos.ReplicationType.RATIS, false, true); // create bucket with quota limit 1 @@ -114,11 +114,11 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - omDirInfo.getObjectID() + 10, - omDirInfo.getObjectID(), 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(omDirInfo.getObjectID() + 10) + .setParentObjectID(omDirInfo.getObjectID()) + .setUpdateID(100) + .build(); OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -136,23 +136,22 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String fileName = "f"; String key = parentDir + "/" + fileName; OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager, getBucketLayout()); + omMetadataManager, getBucketLayout()); // Create parent dirs for the path long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, - bucketName, parentDir, omMetadataManager); + bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is // non-recursive parent should exist. testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); - OMRequestTestUtils.addFileToKeyTable(false, false, - fileName, omKeyInfo, -1, 50, omMetadataManager); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as // overwrite is set to true diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java index 3a1ab92c1b5a..294281555a56 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMRecoverLeaseRequest.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; @@ -35,7 +36,6 @@ .RecoverLeaseRequest; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; @@ -272,8 +272,9 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { String addToOpenFileTable(List locationList) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor, 0, parentId, - 0, Time.now(), version); + bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setParentObjectID(parentId) + .build(); omKeyInfo.appendNewBlocks(locationList, false); omKeyInfo.getMetadata().put(OzoneConsts.HSYNC_CLIENT_ID, String.valueOf(clientID)); @@ -294,8 +295,9 @@ String addToOpenFileTable(List locationList) String addToFileTable(List locationList) throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor, 0, parentId, - 0, Time.now(), version); + bucketName, keyName, replicationConfig, new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setParentObjectID(parentId) + .build(); omKeyInfo.appendNewBlocks(locationList, false); OMRequestTestUtils.addFileToKeyTable( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index eb99cd932568..9fb0e79953e1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -236,7 +237,8 @@ protected OMRequest createAllocateBlockRequest() { KeyArgs keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName) - .setFactor(replicationFactor).setType(replicationType) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .build(); AllocateBlockRequest allocateBlockRequest = @@ -253,8 +255,8 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, - omMetadataManager); + keyName, clientID, replicationConfig, + omMetadataManager); return ""; } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 33512d355c0d..1ecbfed71624 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -20,10 +20,12 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,7 +33,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; /** @@ -65,10 +66,11 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable OMRequestTestUtils.addFileToKeyTable(true, false, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index f040bd508177..cbb782e184fe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -68,7 +68,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationType, replicationFactor, trxnIndex++, + key, clientID, replicationConfig, trxnIndex++, omMetadataManager); String ozoneKey = omMetadataManager.getOzoneKey( volumeName, bucket, key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index c9559ff41e1f..b9aa70b4c7e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -20,8 +20,12 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; + import java.util.List; import java.util.UUID; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -247,7 +251,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index 48d92e608b3e..ea9c3223de5a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -26,7 +26,8 @@ import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; /** * Test Key ACL requests for prefix layout. @@ -44,20 +45,22 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - parentId + 1, parentId, 100, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); final long volumeId = omMetadataManager.getVolumeId( - omKeyInfo.getVolumeName()); + omKeyInfo.getVolumeName()); final long bucketId = omMetadataManager.getBucketId( - omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName()); return omMetadataManager.getOzonePathKey( - volumeId, bucketId, omKeyInfo.getParentObjectID(), - fileName); + volumeId, bucketId, omKeyInfo.getParentObjectID(), + fileName); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index cffbe5ea3023..9719865db196 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -26,6 +26,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; @@ -698,7 +699,8 @@ private OMRequest createCommitKeyRequest( KeyArgs keyArgs = KeyArgs.newBuilder().setDataSize(dataSize).setVolumeName(volumeName) .setKeyName(keyName).setBucketName(bucketName) - .setType(replicationType).setFactor(replicationFactor) + .setType(replicationConfig.getReplicationType()) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) .addAllKeyLocations(keyLocations).build(); CommitKeyRequest commitKeyRequest = @@ -743,7 +745,7 @@ protected String getOzonePathKey() throws IOException { protected String addKeyToOpenKeyTable(List locationList) throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager, + clientID, replicationConfig, omMetadataManager, locationList, version); return omMetadataManager.getOpenKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index d258c1cfde43..48cc52773a33 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -19,19 +19,22 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -78,10 +81,12 @@ protected String addKeyToOpenKeyTable(List locationList) long objectId = 100; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, - Time.now(), version); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(version, new ArrayList<>(), false)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(100L) + .build(); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 12d9d02a72d6..5d79e7771520 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -31,6 +31,7 @@ import java.util.HashMap; import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneAcl; @@ -546,7 +547,8 @@ private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, KeyArgs.Builder keyArgs = KeyArgs.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(replicationFactor).setType(replicationType) + .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setType(replicationConfig.getReplicationType()) .setLatestVersionLocation(true); if (isMultipartKey) { @@ -793,7 +795,7 @@ private void verifyKeyInheritAcls(List keyAcls, protected void addToKeyTable(String keyName) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, - keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); + keyName.substring(1), 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 0750c9512618..2a25a9b09686 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -42,6 +41,7 @@ import java.util.Arrays; import java.util.Collection; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -107,12 +107,13 @@ protected void addToKeyTable(String keyName) throws Exception { Path keyPath = Paths.get(keyName); long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fileName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, - Time.now()); - OMRequestTestUtils.addFileToKeyTable(false, false, - fileName, omKeyInfo, -1, 50, omMetadataManager); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 00d1883d749c..9f1bee28c047 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -192,8 +192,8 @@ protected String addKeyToTable() throws Exception { protected String addKeyToTable(String key) throws Exception { OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, key, clientID, replicationType, replicationFactor, - omMetadataManager); + bucketName, key, clientID, replicationConfig, + omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index 9dafab090295..07094ad2923f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.OzonePrefixPathImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,7 +34,6 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -72,11 +72,11 @@ protected String addKeyToTable() throws Exception { bucketName, PARENT_DIR, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_KEY, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(FILE_NAME); OMRequestTestUtils.addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); @@ -96,11 +96,11 @@ protected String addKeyToDirTable(String volumeName, String bucketName, bucketName, key, omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(key); return omKeyInfo.getPath(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index a1d616c07563..ff3db1abbe20 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -76,7 +76,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, - key, clientID, replicationType, replicationFactor, trxnIndex++, + key, clientID, replicationConfig, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( volumeName, bucket, key)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index a6015870d09b..0a2dcfd5d67a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -240,7 +240,7 @@ protected OMRequest createRenameKeyRequest( protected OmKeyInfo getOmKeyInfo(String keyName) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, 0L); + replicationConfig).build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java index c91b8e158214..40c5156b5dbe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequestWithFSO.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertThrows; + import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -37,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -179,10 +180,10 @@ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - bucketId + 100L, bucketId + 101L, 0L, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(bucketId + 100L) + .setParentObjectID(bucketId + 101L) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 4fced8a7a8c9..47b090f88d43 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -25,6 +25,7 @@ import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -55,7 +56,6 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -110,8 +110,7 @@ public class TestOMKeyRequest { protected String volumeName; protected String bucketName; protected String keyName; - protected HddsProtos.ReplicationType replicationType; - protected HddsProtos.ReplicationFactor replicationFactor; + protected ReplicationConfig replicationConfig; protected long clientID; protected long scmBlockSize = 1000L; protected long dataSize; @@ -209,8 +208,7 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; + replicationConfig = RatisReplicationConfig.getInstance(ReplicationFactor.ONE); clientID = Time.now(); dataSize = 1000L; random = new Random(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index d48131de4bd3..d0cfd48e35dc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -31,6 +31,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -145,8 +146,7 @@ protected void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, omMetadataManager); + parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); deleteKeyArgs.addKeys(key); deleteKeyList.add(key); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java index f28ca2e2685f..2da80550275a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; /** @@ -83,11 +83,13 @@ protected void createPreRequisites() throws Exception { long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, dir, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, dir + "/" + file, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, - Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, dir + "/" + file, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); omKeyInfo.setKeyName(file); OMRequestTestUtils .addFileToKeyTable(false, false, file, omKeyInfo, -1, 50, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index 3d429f4d6847..340b6e36eb0b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.om.request.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -127,8 +129,7 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, - parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, omMetadataManager); + parentDir.concat("/key" + i), RatisReplicationConfig.getInstance(THREE), omMetadataManager); RenameKeysMap.Builder renameKey = RenameKeysMap.newBuilder() .setFromKeyName(key) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java index bfae424cc954..ad834fa556bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequest.java @@ -100,7 +100,7 @@ private OMRequest createSetTimesKeyRequest(long mtime, long atime) { protected String addKeyToTable() throws Exception { OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, - keyName, clientID, replicationType, replicationFactor, 1L, + keyName, clientID, replicationConfig, 1L, omMetadataManager); return omMetadataManager.getOzoneKey(volumeName, bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java index 2cd9273c25a5..0960125b0575 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMSetTimesRequestWithFSO.java @@ -17,17 +17,17 @@ */ package org.apache.hadoop.ozone.om.request.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.junit.jupiter.api.Test; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -115,10 +115,13 @@ protected String addKeyToTable() throws Exception { .addParentsToDirTable(volumeName, bucketName, PARENT_DIR, omMetadataManager); - OmKeyInfo omKeyInfo = OMRequestTestUtils - .createOmKeyInfo(volumeName, bucketName, FILE_NAME, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - parentId + 1, parentId, 100, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, FILE_NAME, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1L) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils .addFileToKeyTable(false, false, FILE_NAME, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java index 25c908b18a2d..f02e1ee23679 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3ExpiredMultipartUploadsAbortRequest.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -27,14 +28,15 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -51,7 +53,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartUploadsExpiredAbortRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; @@ -480,10 +481,13 @@ private List createMPUsWithFSO(String volume, String bucket, commitMultipartRequest, BucketLayout.FILE_SYSTEM_OPTIMIZED); // Add key to open key table to be used in MPU commit processing - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentID + j, parentID, - trxnLogIndex, Time.now(), true); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + j) + .setParentObjectID(parentID) + .setUpdateID(trxnLogIndex) + .build(); + String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, trxnLogIndex, omMetadataManager); @@ -563,8 +567,7 @@ private List createMPUs(String volume, String bucket, // Add key to open key table to be used in MPU commit processing OMRequestTestUtils.addKeyToTable( true, true, - volume, bucket, keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + volume, bucket, keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); OMClientResponse commitResponse = s3MultipartUploadCommitPartRequest.validateAndUpdateCache( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index 61c792a83de3..014b4e021cb3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -24,6 +24,8 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -224,9 +226,8 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, + keyName, clientID, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), omMetadataManager); } protected String getKeyName() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index 4c8e4881d925..24480c249cc8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,15 +24,17 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.ArrayList; import java.util.UUID; /** @@ -68,13 +70,16 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 0L; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, - txnLogId, Time.now(), true); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 1) + .setParentObjectID(parentID) + .setUpdateID(txnLogId) + .build(); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, - fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); + fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 733c790bcf17..0a1ce8f7246f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -28,6 +29,7 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -37,7 +39,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.junit.jupiter.api.Test; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -315,8 +316,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { OMRequestTestUtils.addKeyToTable(true, true, volumeName, bucketName, - keyName, clientID, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, omMetadataManager); + keyName, clientID, RatisReplicationConfig.getInstance(ONE), omMetadataManager); } protected String getMultipartKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index 5926b5fd1d9c..1762f38b44bd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -18,18 +18,21 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.util.Time; import java.io.IOException; +import java.util.ArrayList; import java.util.UUID; /** @@ -72,10 +75,12 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java index 45e5b1007531..a3e83986b531 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -52,6 +52,7 @@ import java.io.IOException; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; @@ -321,8 +322,9 @@ private void renameDir(String fromKey, String toKey, long offset) throws Exception { String fromKeyParentName = UUID.randomUUID().toString(); OmKeyInfo fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 100L); + bucketName, fromKeyParentName, RatisReplicationConfig.getInstance(THREE)) + .setObjectID(100L) + .build(); OmKeyInfo toKeyInfo = addKey(toKey, offset + 4L); OmKeyInfo fromKeyInfo = addKey(fromKey, offset + 5L); @@ -381,8 +383,8 @@ public static OMSnapshotCreateRequest doPreExecute( private OmKeyInfo addKey(String keyName, long objectId) { return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, - objectId); + RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) + .build(); } protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 811e13ac173e..7d6487493861 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -20,6 +20,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.Table; @@ -82,7 +84,7 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).build(); ThreadLocalRandom random = ThreadLocalRandom.current(); long usedNamespace = Math.abs(random.nextLong(Long.MAX_VALUE)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index c7e2c265b7bb..c639c77c08e3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -41,11 +40,11 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index e5a6b0ab14f5..88ef2964d17e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -92,7 +92,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { protected OmKeyInfo createOmKeyInfo() throws Exception { return OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); + bucketName, keyName, replicationConfig).build(); } protected String getOpenKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index 85e9354ca8c9..b574b8548132 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -18,18 +18,19 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; + /** * Tests OMAllocateBlockResponse - prefix layout. */ @@ -49,12 +50,11 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long txnId = 50; long objectId = parentID + 1; - OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now()); - return omKeyInfoFSO; + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index bb95c43107c3..89b179391cee 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -81,7 +81,7 @@ public void testAddToDBBatch() throws Exception { public void testAddToDBBatchNoOp() throws Exception { OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, replicationType, replicationFactor); + bucketName, keyName, replicationConfig).build(); OzoneManagerProtocolProtos.OMResponse omResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse( @@ -135,7 +135,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @Nonnull protected void addKeyToOpenKeyTable() throws Exception { OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + clientID, replicationConfig, omMetadataManager); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index a1173e554325..32d55d3e961c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -18,17 +18,19 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; + import java.util.HashMap; import java.util.Map; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -62,11 +64,11 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationConfig) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull @@ -77,11 +79,11 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, - Time.now()); - + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(100L) + .build(); String fileName = OzoneFSUtils.getFileName(keyName); OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index ee83f3671277..53d86e667367 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -18,13 +18,15 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import jakarta.annotation.Nonnull; import java.io.IOException; @@ -50,11 +52,12 @@ protected String getOpenKeyName() throws IOException { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(omBucketInfo); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - omBucketInfo.getBucketName(), keyName, replicationType, - replicationFactor, - omBucketInfo.getObjectID() + 1, - omBucketInfo.getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(omBucketInfo.getObjectID() + 1) + .setParentObjectID(omBucketInfo.getObjectID()) + .setUpdateID(100L) + .build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 4690b6f56f72..6440edd0327c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -22,7 +22,6 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.utils.db.Table; @@ -89,8 +88,7 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { Pipeline pipeline = Pipeline.newBuilder() .setState(Pipeline.PipelineState.OPEN) .setId(PipelineID.randomId()) - .setReplicationConfig(RatisReplicationConfig - .getInstance(replicationFactor)) + .setReplicationConfig(replicationConfig) .setNodes(new ArrayList<>()) .build(); @@ -167,7 +165,7 @@ protected String addKeyToTable() throws Exception { keyName); OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, - clientID, replicationType, replicationFactor, omMetadataManager); + clientID, replicationConfig, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index fda72eb91243..557839f44f7a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.response.key; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertNotNull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.util.Time; /** * Tests OMKeyDeleteResponse - prefix layout. @@ -50,11 +51,11 @@ protected String addKeyToTable() throws Exception { bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1, - parentId, 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1) + .setParentObjectID(parentId) + .setUpdateID(100L) + .build(); OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omMetadataManager.getOzonePathKey( @@ -66,11 +67,12 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { assertNotNull(getOmBucketInfo()); - return OMRequestTestUtils.createOmKeyInfo(volumeName, - getOmBucketInfo().getBucketName(), keyName, replicationType, - replicationFactor, - getOmBucketInfo().getObjectID() + 1, - getOmBucketInfo().getObjectID(), 100, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, + replicationConfig) + .setObjectID(getOmBucketInfo().getObjectID() + 1) + .setParentObjectID(getOmBucketInfo().getObjectID()) + .setUpdateID(100L) + .build(); } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 2dcef56330f2..07c094cc98a1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -154,12 +154,10 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo getOmKeyInfo(String keyName) { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor, 0L); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); } - protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, - String keyName) { + protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { return getOmKeyInfo(keyName); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java index f2f9ccaf872e..edbb50d66f86 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponseWithFSO.java @@ -18,17 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.TestOMResponseUtils; -import org.apache.hadoop.util.Time; import java.io.IOException; import java.util.UUID; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; /** @@ -38,19 +38,21 @@ public class TestOMKeyRenameResponseWithFSO extends TestOMKeyRenameResponse { @Override protected OmKeyInfo getOmKeyInfo(String keyName) { long bucketId = random.nextLong(); - return OMRequestTestUtils.createOmKeyInfo( - volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - bucketId + 100L, bucketId + 101L, 0L, Time.now()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(bucketId + 100) + .setParentObjectID(bucketId + 101) + .build(); } @Override protected OmKeyInfo getOmKeyInfo(OmKeyInfo toKeyInfo, String keyName) { - return OMRequestTestUtils.createOmKeyInfo(toKeyInfo.getVolumeName(), - toKeyInfo.getBucketName(), keyName, replicationType, - replicationFactor, toKeyInfo.getObjectID(), - toKeyInfo.getParentObjectID(), 0L, toKeyInfo.getCreationTime()); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(toKeyInfo.getObjectID()) + .setParentObjectID(toKeyInfo.getParentObjectID()) + .setUpdateID(0L) + .setCreationTime(toKeyInfo.getCreationTime()) + .build(); } @Override @@ -80,12 +82,12 @@ protected void createParent() { long bucketId = random.nextLong(); String fromKeyParentName = UUID.randomUUID().toString(); String toKeyParentName = UUID.randomUUID().toString(); - fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, fromKeyParentName, replicationType, replicationFactor, - bucketId + 100L); - toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, toKeyParentName, replicationType, replicationFactor, - bucketId + 101L); + fromKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fromKeyParentName, replicationConfig) + .setObjectID(bucketId + 100L) + .build(); + toKeyParent = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, toKeyParentName, replicationConfig) + .setObjectID(bucketId + 101L) + .build(); fromKeyParent.setParentObjectID(bucketId); toKeyParent.setParentObjectID(bucketId); fromKeyParent.setFileName(OzoneFSUtils.getFileName( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index 1cbf5c6d0b2d..bc4c34bd0db3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -24,6 +24,7 @@ import java.util.Random; import java.util.UUID; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -59,8 +60,7 @@ public class TestOMKeyResponse { protected String volumeName; protected String bucketName; protected String keyName; - protected HddsProtos.ReplicationFactor replicationFactor; - protected HddsProtos.ReplicationType replicationType; + protected ReplicationConfig replicationConfig; protected OmBucketInfo omBucketInfo; protected long clientID; protected Random random; @@ -78,18 +78,18 @@ public void setup() throws Exception { volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); keyName = UUID.randomUUID().toString(); - replicationFactor = HddsProtos.ReplicationFactor.ONE; - replicationType = HddsProtos.ReplicationType.RATIS; + replicationConfig = ReplicationConfig.fromProtoTypeAndFactor( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); clientID = 1000L; random = new Random(); keysToDelete = null; final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume(volumeName) - .setAdminName("admin") - .setOwnerName("owner") - .setObjectID(System.currentTimeMillis()) - .build(); + .setVolume(volumeName) + .setAdminName("admin") + .setOwnerName("owner") + .setObjectID(System.currentTimeMillis()) + .build(); omMetadataManager.getVolumeTable().addCacheEntry( new CacheKey<>(omMetadataManager.getVolumeKey(volumeName)), @@ -117,8 +117,7 @@ protected String getOpenKeyName() throws IOException { @Nonnull protected OmKeyInfo getOmKeyInfo() { - return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - replicationType, replicationFactor); + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationConfig).build(); } @Nonnull diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 0c9c725c1b86..7a14e15a19bd 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -31,7 +32,6 @@ import java.util.List; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; @@ -63,7 +63,7 @@ protected void createPreRequisities() throws Exception { for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); + bucketName, keyName, 0L, RatisReplicationConfig.getInstance(THREE), omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList .add(omMetadataManager.getKeyTable(getBucketLayout()).get(ozoneKey)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java index fd70308c43d1..6a3a709c341c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponseWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -33,13 +33,13 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.hadoop.util.Time; import org.junit.jupiter.api.Test; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.DeleteKeys; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -93,10 +93,11 @@ protected void createPreRequisities() throws Exception { keyName = keyPrefix + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, dirId + 1, buckId, - dirId + 1, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(dirId + 1) + .setParentObjectID(buckId) + .setUpdateID(dirId + 1) + .build(); ozoneDBKey = OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 0824f7c33de7..72a76a1aca4f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -32,7 +33,6 @@ import java.util.Map; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -117,7 +117,8 @@ private void createPreRequisities() throws Exception { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); OMRequestTestUtils.addKeyToTable(false, volumeName, - bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, + bucketName, parentDir.concat("/key" + i), 0L, + RatisReplicationConfig.getInstance(THREE), omMetadataManager); OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable(getBucketLayout()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index f4f0e729f05d..c9a4109809ed 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -208,7 +208,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, long parentID = random.nextLong(); OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, key, replicationType, replicationFactor); + bucket, key, replicationConfig).build(); if (keyLength > 0) { OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java index b356dddd6b57..35600c331f3f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3ExpiredMultipartUploadsAbortResponse.java @@ -19,15 +19,19 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartAbortInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -277,10 +281,10 @@ private Map> addMPUsToDB( OmBucketInfo omBucketInfo = OMRequestTestUtils.addBucketToDB(volume, bucket, omMetadataManager, getBucketLayout()); - final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, - bucket, keyName, - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, - 0L, Time.now(), true); + ReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(ONE); + final OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, keyName, replicationConfig, + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .build(); if (getBucketLayout().equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { omKeyInfo.setParentObjectID(omBucketInfo.getObjectID()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index 47aa641c1ebb..e7a570350cff 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -18,14 +18,17 @@ package org.apache.hadoop.ozone.om.response.s3.multipart; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -85,14 +88,16 @@ public void testAddDBToBatch() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); String dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, - parentID, fileName, clientId); + parentID, fileName, clientId); String dbKey = omMetadataManager.getOzonePathKey(volumeId, bucketId, - parentID, fileName); + parentID, fileName); OmKeyInfo omKeyInfoFSO = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -175,9 +180,11 @@ public void testAddDBToBatchWithNullBucketInfo() throws Exception { parentID, fileName); OmKeyInfo omKeyInfoFSO = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, - Time.now(), true); + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(objectId) + .setParentObjectID(parentID) + .setUpdateID(txnId) + .build(); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); @@ -244,20 +251,20 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String keyName = getKeyName(); OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, - omMetadataManager); + omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, - bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentID + 8, - parentID, 8, Time.now(), true); + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 8) + .setParentObjectID(parentID) + .setUpdateID(8) + .build(); RepeatedOmKeyInfo prevKeys = new RepeatedOmKeyInfo(prevKey); String ozoneKey = omMetadataManager - .getOzoneKey(prevKey.getVolumeName(), - prevKey.getBucketName(), prevKey.getFileName()); + .getOzoneKey(prevKey.getVolumeName(), + prevKey.getBucketName(), prevKey.getFileName()); omMetadataManager.getDeletedTable().put(ozoneKey, prevKeys); long oId = runAddDBToBatchWithParts(volumeName, bucketName, keyName, 1); @@ -312,11 +319,12 @@ private long runAddDBToBatchWithParts(String volumeName, omMetadataManager.getBucketTable().get(bucketKey); OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentID + 9, - parentID, 100, Time.now(), true); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) + .setObjectID(parentID + 9) + .setParentObjectID(parentID) + .setUpdateID(100) + .build(); List unUsedParts = new ArrayList<>(); unUsedParts.add(omKeyInfo); S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java index c8a3faae4cca..8dcb030d637a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java @@ -23,9 +23,10 @@ import java.io.IOException; import java.nio.file.Path; import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.om.KeyManager; @@ -47,6 +48,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -129,10 +131,11 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { for (int i = 0; i < 2000; ++i) { String keyName = "key" + longName + i; OmKeyInfo omKeyInfo = - OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, - keyName, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, dir1.getObjectID() + 1 + i, - dir1.getObjectID(), 100, Time.now()); + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(ONE)) + .setObjectID(dir1.getObjectID() + 1 + i) + .setParentObjectID(dir1.getObjectID()) + .setUpdateID(100L) + .build(); OMRequestTestUtils.addFileToKeyTable(false, true, keyName, omKeyInfo, 1234L, i + 1, om.getMetadataManager()); } @@ -143,7 +146,7 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception { .setBucketName(bucketName) .setKeyName("dir" + longName) .setReplicationConfig(StandaloneReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.ONE)) + ONE)) .setDataSize(0).setRecursive(true) .build(); writeClient.deleteKey(delArgs); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java index 5ac7835f8ce6..1a0db1183311 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestQuotaRepairTask.java @@ -19,8 +19,11 @@ package org.apache.hadoop.ozone.om.service; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.junit.jupiter.api.Assertions.assertEquals; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -48,8 +51,7 @@ public void testQuotaRepair() throws Exception { String parentDir = "/user"; for (int i = 0; i < count; i++) { OMRequestTestUtils.addKeyToTableAndCache(volumeName, bucketName, - parentDir.concat("/key" + i), -1, HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE, 150 + i, omMetadataManager); + parentDir.concat("/key" + i), -1, RatisReplicationConfig.getInstance(THREE), 150 + i, omMetadataManager); } String fsoBucketName = "fso" + bucketName; @@ -59,12 +61,13 @@ public void testQuotaRepair() throws Exception { fsoBucketName, "c/d/e", omMetadataManager); for (int i = 0; i < count; i++) { String fileName = "file1" + i; - OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo( - volumeName, fsoBucketName, fileName, - HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.ONE, - parentId + 1 + i, - parentId, 100 + i, Time.now()); + OmKeyInfo omKeyInfo = + OMRequestTestUtils.createOmKeyInfo(volumeName, fsoBucketName, fileName, + RatisReplicationConfig.getInstance(ONE)) + .setObjectID(parentId + 1 + i) + .setParentObjectID(parentId) + .setUpdateID(100L + i) + .build(); omKeyInfo.setKeyName(fileName); OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50 + i, omMetadataManager); From e93e781ffc991571cbd335e338f4d5a8fbe4a223 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 8 Feb 2024 18:50:21 +0100 Subject: [PATCH 004/108] HDDS-10291. Set simple properties in MiniOzoneCluster via Configuration (#6186) --- .../org/apache/hadoop/fs/ozone/TestHSync.java | 3 +- .../hadoop/fs/ozone/TestLeaseRecovery.java | 4 +- .../TestOzoneFileSystemWithStreaming.java | 3 +- .../hadoop/fs/ozone/TestOzoneFsHAURLs.java | 1 - ...estSCMContainerPlacementPolicyMetrics.java | 2 +- .../hdds/scm/TestStorageContainerManager.java | 18 +++--- .../hadoop/hdds/scm/node/TestQueryNode.java | 9 +-- .../scm/pipeline/TestLeaderChoosePolicy.java | 9 ++- .../hdds/scm/pipeline/TestNodeFailure.java | 5 +- .../TestRatisPipelineCreateAndDestroy.java | 8 ++- .../hdds/scm/pipeline/TestSCMRestart.java | 11 ++-- .../hdds/scm/storage/TestCommitWatcher.java | 1 - .../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 19 ++++--- .../hdds/upgrade/TestScmHAFinalization.java | 4 +- .../apache/hadoop/ozone/MiniOzoneCluster.java | 55 ------------------- .../hadoop/ozone/MiniOzoneClusterImpl.java | 42 +------------- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 11 ---- .../ozone/client/TestOzoneClientFactory.java | 3 +- .../rpc/AbstractTestECKeyOutputStream.java | 7 ++- .../client/rpc/Test2WayCommitInRatis.java | 3 - .../hadoop/ozone/client/rpc/TestBCSID.java | 4 +- .../client/rpc/TestBlockDataStreamOutput.java | 1 - .../client/rpc/TestBlockOutputStream.java | 1 - .../rpc/TestContainerReplicationEndToEnd.java | 4 +- .../client/rpc/TestContainerStateMachine.java | 3 +- ...estContainerStateMachineFailureOnRead.java | 4 +- .../TestContainerStateMachineFailures.java | 4 +- .../TestContainerStateMachineFlushDelay.java | 3 +- .../rpc/TestContainerStateMachineStream.java | 10 ++-- .../rpc/TestDeleteWithInAdequateDN.java | 10 ++-- .../rpc/TestFailureHandlingByClient.java | 3 +- ...TestFailureHandlingByClientFlushDelay.java | 2 +- .../rpc/TestHybridPipelineOnDatanode.java | 4 +- .../TestMultiBlockWritesWithDnFailures.java | 1 - ...TestOzoneClientMultipartUploadWithFSO.java | 3 +- ...oneClientRetriesOnExceptionFlushDelay.java | 1 - .../TestOzoneClientRetriesOnExceptions.java | 1 - .../rpc/TestOzoneRpcClientAbstract.java | 3 +- .../rpc/TestValidateBCSIDOnRestart.java | 5 +- .../client/rpc/read/TestInputStreamBase.java | 2 +- .../container/TestECContainerRecovery.java | 5 +- .../commandhandler/TestBlockDeletion.java | 1 - .../TestCloseContainerByPipeline.java | 2 +- .../ozone/dn/ratis/TestDnRatisLogParser.java | 4 +- .../hadoop/ozone/freon/TestDataValidate.java | 5 +- .../TestFreonWithDatanodeFastRestart.java | 4 +- .../freon/TestFreonWithPipelineDestroy.java | 5 +- .../hadoop/ozone/om/TestKeyPurging.java | 3 +- .../ozone/om/TestOMBucketLayoutUpgrade.java | 3 +- .../ozone/om/TestOMUpgradeFinalization.java | 3 +- .../hadoop/ozone/om/TestScmSafeMode.java | 9 +-- .../om/multitenant/TestMultiTenantVolume.java | 5 +- .../ozone/om/snapshot/TestOmSnapshot.java | 3 +- .../src/test/resources/ozone-site.xml | 15 +++++ 54 files changed, 148 insertions(+), 206 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index 05d297d38ed8..d2e2ea6f7ef9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -77,6 +77,7 @@ import org.slf4j.event.Level; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -117,6 +118,7 @@ public static void init() throws Exception { CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); CONF.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); + CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) @@ -129,7 +131,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index ca68aad45515..ffd54cfea869 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -43,6 +43,7 @@ import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -92,6 +93,8 @@ public void init() throws IOException, InterruptedException, conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); + ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) .setChunkSize(chunkSize) @@ -104,7 +107,6 @@ public void init() throws IOException, InterruptedException, cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 2a6c8c456b9c..6ec6a32d4fba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -44,6 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; @@ -87,6 +88,7 @@ public static void init() throws Exception { CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); + CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -100,7 +102,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(CONF) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 47dc9ac0c3ba..98b87d9d3031 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -109,7 +109,6 @@ static void initClass(@TempDir File tempDir) throws Exception { // Start the cluster cluster = MiniOzoneCluster.newOMHABuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java index 90f8375f829b..4ac44315556c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -85,9 +85,9 @@ public void setup() throws Exception { StaticMapping.addNodeToRack(NetUtils.normalizeHostNames( Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(4) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index e973c842de44..f64736ac88a8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -291,7 +291,6 @@ public void testBlockDeletionTransactions() throws Exception { numKeys); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); @@ -371,10 +370,11 @@ public void testBlockDeletionTransactions() throws Exception { @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setHbInterval(1000) - .setHbProcessorInterval(3000).setNumDatanodes(1) - .build(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); + MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build(); cluster.waitForClusterToBeReady(); try { @@ -462,10 +462,10 @@ public void testBlockDeletingThrottling() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); @@ -826,10 +826,10 @@ public void testCloseContainerCommandOnRestart() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(3000) .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java index e8dc7455a11c..683a0c176eb9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/node/TestQueryNode.java @@ -32,11 +32,9 @@ import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -50,7 +48,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -65,11 +62,7 @@ public class TestQueryNode { @BeforeEach public void setUp() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - final int interval = 1000; - conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - interval, TimeUnit.MILLISECONDS); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS); @@ -77,10 +70,10 @@ public void setUp() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + numOfDatanodes / 2); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes / 2) .build(); cluster.waitForClusterToBeReady(); scmClient = new ContainerOperationClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index 439b563d6330..51b5d84a13e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -37,9 +37,12 @@ import java.util.Map; import java.util.UUID; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -56,12 +59,12 @@ public class TestLeaderChoosePolicy { public void init(int numDatanodes, int datanodePipelineLimit) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, datanodePipelineLimit); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) - .setHbInterval(2000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java index c73ffb982cf6..d8840436ee0b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNodeFailure.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.pipeline; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -61,11 +62,11 @@ public static void init() throws Exception { conf.setFromObject(ratisServerConfig); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.set(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, "2s"); + conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(6) - .setHbInterval(1000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 443105b6ccb6..829a9581f663 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -38,8 +38,10 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -55,14 +57,14 @@ public class TestRatisPipelineCreateAndDestroy { public void init(int numDatanodes) throws Exception { conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, numDatanodes + numDatanodes / 3); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 2000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, 500, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) - .setHbInterval(2000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java index 988f163adab5..6ce05ad3be74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMRestart.java @@ -35,6 +35,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotSame; @@ -60,17 +61,17 @@ public class TestSCMRestart { */ @BeforeAll public static void init() throws Exception { + final int numOfNodes = 4; conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - int numOfNodes = 4; + // allow only one FACTOR THREE pipeline. + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfNodes + 1); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfNodes) - // allow only one FACTOR THREE pipeline. - .setTotalPipelineNumLimit(numOfNodes + 1) - .setHbInterval(1000) - .setHbProcessorInterval(1000) .build(); cluster.waitForClusterToBeReady(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 563e0162acc6..1363dc2269a4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -144,7 +144,6 @@ public void init() throws Exception { StorageUnit.MB); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 57e807b7c751..199b4b63ff74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -20,12 +20,17 @@ import static java.lang.Thread.sleep; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_COMPLETE_FINALIZATION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_POST_FINALIZE_UPGRADE; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_PRE_FINALIZE_UPGRADE; @@ -148,7 +153,13 @@ public static void initClass() { OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.set(OZONE_DATANODE_PIPELINE_LIMIT, "1"); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); + // allow only one FACTOR THREE pipeline. + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, NUM_DATA_NODES + 1); + conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 500, TimeUnit.MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, TimeUnit.MILLISECONDS); scmFinalizationExecutor = new InjectedUpgradeFinalizationExecutor<>(); SCMConfigurator scmConfigurator = new SCMConfigurator(); @@ -159,12 +170,6 @@ public static void initClass() { .setNumDatanodes(NUM_DATA_NODES) .setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) - // allow only one FACTOR THREE pipeline. - .setTotalPipelineNumLimit(NUM_DATA_NODES + 1) - .setHbInterval(500) - .setHbProcessorInterval(500) - .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()) - .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); // Setting the provider to a max of 100 clusters. Some of the tests here diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index d2ae30efcebc..aa9f561aa02b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -55,6 +55,7 @@ import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -87,11 +88,12 @@ public void init(OzoneConfiguration conf, SCMConfigurator configurator = new SCMConfigurator(); configurator.setUpgradeFinalizationExecutor(executor); + conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + MiniOzoneCluster.Builder clusterBuilder = new MiniOzoneHAClusterImpl.Builder(conf) .setNumOfStorageContainerManagers(NUM_SCMS) .setNumOfActiveSCMs(NUM_SCMS - numInactiveSCMs) - .setScmLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .setSCMServiceId("scmservice") .setSCMConfigurator(configurator) .setNumOfOzoneManagers(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index e864cae00b37..667f7448a1bb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -297,11 +297,8 @@ default String getBaseDir() { @SuppressWarnings("visibilitymodifier") abstract class Builder { - protected static final int DEFAULT_HB_INTERVAL_MS = 1000; - protected static final int DEFAULT_HB_PROCESSOR_INTERVAL_MS = 100; protected static final int ACTIVE_OMS_NOT_SET = -1; protected static final int ACTIVE_SCMS_NOT_SET = -1; - protected static final int DEFAULT_PIPELINE_LIMIT = 3; protected static final int DEFAULT_RATIS_RPC_TIMEOUT_SEC = 1; protected OzoneConfiguration conf; @@ -317,17 +314,12 @@ abstract class Builder { protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; protected SCMConfigurator scmConfigurator; - protected Optional hbInterval = Optional.empty(); - protected Optional hbProcessorInterval = Optional.empty(); protected String scmId = UUID.randomUUID().toString(); protected String omId = UUID.randomUUID().toString(); protected Optional datanodeReservedSpace = Optional.empty(); protected boolean includeRecon = false; - - protected Optional omLayoutVersion = Optional.empty(); - protected Optional scmLayoutVersion = Optional.empty(); protected Optional dnLayoutVersion = Optional.empty(); protected int numOfDatanodes = 3; @@ -335,7 +327,6 @@ abstract class Builder { protected boolean startDataNodes = true; protected CertificateClient certClient; protected SecretKeyClient secretKeyClient; - protected int pipelineNumLimit = DEFAULT_PIPELINE_LIMIT; protected Builder(OzoneConfiguration conf) { this.conf = conf; @@ -424,42 +415,6 @@ public Builder setNumDataVolumes(int val) { return this; } - /** - * Sets the total number of pipelines to create. - * @param val number of pipelines - * @return MiniOzoneCluster.Builder - */ - public Builder setTotalPipelineNumLimit(int val) { - pipelineNumLimit = val; - return this; - } - - /** - * Sets the number of HeartBeat Interval of Datanodes, the value should be - * in MilliSeconds. - * - * @param val HeartBeat interval in milliseconds - * - * @return MiniOzoneCluster.Builder - */ - public Builder setHbInterval(int val) { - hbInterval = Optional.of(val); - return this; - } - - /** - * Sets the number of HeartBeat Processor Interval of Datanodes, - * the value should be in MilliSeconds. - * - * @param val HeartBeat Processor interval in milliseconds - * - * @return MiniOzoneCluster.Builder - */ - public Builder setHbProcessorInterval(int val) { - hbProcessorInterval = Optional.of(val); - return this; - } - /** * Sets the reserved space * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys} @@ -513,16 +468,6 @@ public Builder setSCMServiceId(String serviceId) { return this; } - public Builder setScmLayoutVersion(int layoutVersion) { - scmLayoutVersion = Optional.of(layoutVersion); - return this; - } - - public Builder setOmLayoutVersion(int layoutVersion) { - omLayoutVersion = Optional.of(layoutVersion); - return this; - } - public Builder setDnLayoutVersion(int layoutVersion) { dnLayoutVersion = Optional.of(layoutVersion); return this; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 400ae3ee2cc8..dd9b83e66db6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -81,19 +81,16 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.io.FileUtils; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; @@ -652,10 +649,6 @@ protected void initializeConfiguration() throws IOException { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - // MiniOzoneCluster should have global pipeline upper limit. - conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, - pipelineNumLimit >= DEFAULT_PIPELINE_LIMIT ? - pipelineNumLimit : DEFAULT_PIPELINE_LIMIT); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, DEFAULT_RATIS_RPC_TIMEOUT_SEC, TimeUnit.SECONDS); SCMClientConfig scmClientConfig = conf.getObject(SCMClientConfig.class); @@ -681,14 +674,8 @@ void removeConfiguration() { protected StorageContainerManager createSCM() throws IOException, AuthenticationException { configureSCM(); - SCMStorageConfig scmStore; - // Set non standard layout version if needed. - scmLayoutVersion.ifPresent(integer -> - conf.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - - scmStore = new SCMStorageConfig(conf); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); StorageContainerManager scm = HddsTestUtils.getScmSimple(conf, scmConfigurator); @@ -745,9 +732,6 @@ void initializeOmStorage(OMStorage omStorage) throws IOException { protected OzoneManager createOM() throws IOException, AuthenticationException { configureOM(); - omLayoutVersion.ifPresent(integer -> - conf.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); OMStorage omStore = new OMStorage(conf); initializeOmStorage(omStore); return OzoneManager.createOm(conf); @@ -850,30 +834,6 @@ protected void configureSCM() { localhostWithFreePort()); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "3s"); - configureSCMheartbeat(); - } - - private void configureSCMheartbeat() { - if (hbInterval.isPresent()) { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - hbInterval.get(), TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, - DEFAULT_HB_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } - - if (hbProcessorInterval.isPresent()) { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - hbProcessorInterval.get(), - TimeUnit.MILLISECONDS); - } else { - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, - DEFAULT_HB_PROCESSOR_INTERVAL_MS, - TimeUnit.MILLISECONDS); - } } private void configureOM() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 797a7515f206..ceade72e7d4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -56,8 +56,6 @@ import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; -import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; -import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; @@ -487,11 +485,6 @@ protected OMHAService createOMService() throws IOException, String metaDirPath = path + "/" + nodeId; config.set(OZONE_METADATA_DIRS, metaDirPath); - // Set non standard layout version if needed. - omLayoutVersion.ifPresent(integer -> - config.set(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - OzoneManager.omInit(config); OzoneManager om = OzoneManager.createOm(config); if (certClient != null) { @@ -555,10 +548,6 @@ protected SCMHAService createSCMService() scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId); scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - scmLayoutVersion.ifPresent(integer -> - scmConfig.set(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, - String.valueOf(integer))); - configureSCM(); if (i == 1) { StorageContainerManager.scmInit(scmConfig, clusterId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java index f2a079ca80ca..e2a15595b553 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.client; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -36,10 +37,10 @@ public class TestOzoneClientFactory { public void testRemoteException() { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); Exception e = assertThrows(Exception.class, () -> { MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setTotalPipelineNumLimit(10) .build(); String omPort = cluster.getOzoneManager().getRpcPort(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java index 0b0b2586c9e2..b40b0bbcc626 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/AbstractTestECKeyOutputStream.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; @@ -121,6 +122,7 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { TimeUnit.SECONDS); conf.setBoolean(OzoneConfigKeys.OZONE_EC_GRPC_ZERO_COPY_ENABLED, zeroCopyEnabled); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -129,8 +131,9 @@ protected static void init(boolean zeroCopyEnabled) throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10).build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) + .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java index e7c8be170ca1..d7ce08338db8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/Test2WayCommitInRatis.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; @@ -85,8 +84,6 @@ private void startCluster(OzoneConfiguration conf) throws Exception { blockSize = 2 * maxFlushSize; // Make sure the pipeline does not get destroyed quickly - conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, - 60, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 60000, TimeUnit.SECONDS); DatanodeRatisServerConfig ratisServerConfig = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index c0ae49f3bf41..d668bb4b6522 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -49,6 +49,7 @@ HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys. HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -84,10 +85,11 @@ public static void init() throws Exception { conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 8bb791bb103e..ea1b16b0483a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -105,7 +105,6 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 1e9cefbaa481..e15e1e4d63ba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -115,7 +115,6 @@ static MiniOzoneCluster createCluster() throws IOException, MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index fe08b9e0f4ba..78a4e78647eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -59,6 +59,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; @@ -115,11 +116,12 @@ public static void init() throws Exception { replicationConf.setInterval(Duration.ofMillis(containerReportInterval)); conf.setFromObject(replicationConf); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 6); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4) - .setTotalPipelineNumLimit(6).setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); cluster.getStorageContainerManager().getReplicationManager().start(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 84b7579cd01d..563904922e9b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -53,6 +53,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -92,6 +93,7 @@ public void setup() throws Exception { baseDir.mkdirs(); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -113,7 +115,6 @@ public void setup() throws Exception { // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index 1d0f25b3a041..eea068a8742f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -51,6 +51,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -94,6 +95,8 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); + DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000)); @@ -118,7 +121,6 @@ public void setup() throws Exception { conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 2c11177e5eaf..4588a86a48c7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -83,6 +83,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.QUASI_CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; @@ -143,6 +144,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); @@ -169,7 +171,7 @@ public static void init() throws Exception { conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 23ab89b80c65..bf41df6c7878 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -101,6 +102,7 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); @@ -119,7 +121,6 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) - .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) .setSecretKeyClient(new SecretKeyTestClient()) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index 97a3047bfdb0..be27dab58ed0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -89,6 +90,7 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); @@ -123,11 +125,9 @@ public void setup() throws Exception { conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); - cluster = - MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setHbInterval(200) - .build(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index fa50dac64f7e..d4ff85736273 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -65,6 +65,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; @@ -101,15 +102,20 @@ public class TestDeleteWithInAdequateDN { */ @BeforeAll public static void init() throws Exception { + final int numOfDatanodes = 3; + conf = new OzoneConfiguration(); path = GenericTestUtils .getTempPath(TestContainerStateMachineFailures.class.getSimpleName()); File baseDir = new File(path); baseDir.mkdirs(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, + TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT); // Make the stale, dead and server failure timeout higher so that a dead // node is not detecte at SCM as well as the pipeline close action // never gets initiated early at Datanode in the test. @@ -156,12 +162,8 @@ public static void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setQuietMode(false); - int numOfDatanodes = 3; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numOfDatanodes) - .setTotalPipelineNumLimit( - numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT) - .setHbInterval(100) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(THREE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index d1b20de88a86..4e0508792959 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -116,6 +116,7 @@ private void init() throws Exception { conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -139,7 +140,7 @@ private void init() throws Exception { Collections.singleton(HddsUtils.getHostName(conf))).get(0), "/rack1"); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(10).setTotalPipelineNumLimit(15).build(); + .setNumDatanodes(10).build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index fadc06bd57bd..a385edd0275c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -112,6 +112,7 @@ private void init() throws Exception { conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -140,7 +141,6 @@ private void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index 4ccdd0e2d4b3..51ebf3fa0ccd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -44,6 +44,7 @@ import java.io.IOException; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -73,8 +74,9 @@ public class TestHybridPipelineOnDatanode { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3) - .setTotalPipelineNumLimit(5).build(); + .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index b7b52d389bc3..ed00686bd8a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -112,7 +112,6 @@ private void startCluster(int datanodes) throws Exception { conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(datanodes) - .setTotalPipelineNumLimit(0) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index a89e61769966..268a192640c6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -78,6 +78,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -114,6 +115,7 @@ public class TestOzoneClientMultipartUploadWithFSO { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); } @@ -135,7 +137,6 @@ public static void shutdown() throws IOException { static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index c3e8a8d461b8..ad59621e0c75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -109,7 +109,6 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index cd99382f300b..3e1667a38a68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -118,7 +118,6 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(3) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index a87d05321e27..98bf65ad6b6f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -129,6 +129,7 @@ import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OmUtils.LOG; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; @@ -203,6 +204,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { // Reduce long wait time in MiniOzoneClusterImpl#waitForHddsDatanodesStop // for testZReadKeyWithUnhealthyContainerReplica. conf.set("ozone.scm.stale.node.interval", "10s"); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.MB) .setDataStreamMinPacketSize(1) @@ -210,7 +212,6 @@ static void startCluster(OzoneConfiguration conf) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(14) - .setTotalPipelineNumLimit(10) .build(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index a8029987fedd..1e22613f929b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -57,6 +57,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -100,6 +101,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS); @@ -125,8 +127,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2). - setHbInterval(200) + MiniOzoneCluster.newBuilder(conf).setNumDatanodes(2) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 9f5d04c56f94..256148dfb8de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -53,6 +53,7 @@ protected static MiniOzoneCluster newCluster( conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 5); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); @@ -73,7 +74,6 @@ protected static MiniOzoneCluster newCluster( return MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setTotalPipelineNumLimit(5) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java index e045b48bda96..24064ae5c883 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestECContainerRecovery.java @@ -140,6 +140,7 @@ public static void init() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) .setBlockSize(blockSize) @@ -148,8 +149,8 @@ public static void init() throws Exception { .setStreamBufferMaxSize(maxFlushSize) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) - .setTotalPipelineNumLimit(10) + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(10) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index d5564ac2315e..5ff8d713649e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -180,7 +180,6 @@ public void init() throws Exception { conf.setFromObject(replicationConf); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(50) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index cd25ee25c8f4..8d22eddadc59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -85,10 +85,10 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1"); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 15); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(10) - .setTotalPipelineNumLimit(15) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index cca47e17e407..8e95e6cb18a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -35,6 +35,7 @@ import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.assertj.core.api.Assertions.assertThat; /** @@ -50,8 +51,9 @@ public class TestDnRatisLogParser { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 2); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).setTotalPipelineNumLimit(2).build(); + .setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); System.setOut(new PrintStream(out, false, UTF_8.name())); System.setErr(new PrintStream(err, false, UTF_8.name())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java index 28cc863c26d5..e1f2061c7d46 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDataValidate.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.junit.jupiter.api.Test; import picocli.CommandLine; @@ -52,8 +53,10 @@ static void startCluster(OzoneConfiguration conf) throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); conf.setFromObject(raftClientConfig); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); + cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5).setTotalPipelineNumLimit(8).build(); + .setNumDatanodes(5).build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, 180000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java index 0798731a839d..862b52c8e9e1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeFastRestart.java @@ -34,6 +34,7 @@ import picocli.CommandLine; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -54,9 +55,8 @@ public class TestFreonWithDatanodeFastRestart { @BeforeAll public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); cluster = MiniOzoneCluster.newBuilder(conf) - .setHbProcessorInterval(1000) - .setHbInterval(1000) .setNumDatanodes(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index d78beff7e78b..08c1b3bd3b35 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -61,6 +61,8 @@ public static void init() throws Exception { 1, TimeUnit.SECONDS); conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, TimeUnit.MILLISECONDS); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 8); DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); @@ -74,10 +76,7 @@ public static void init() throws Exception { conf.setFromObject(raftClientConfig); cluster = MiniOzoneCluster.newBuilder(conf) - .setHbProcessorInterval(1000) - .setHbInterval(1000) .setNumDatanodes(3) - .setTotalPipelineNumLimit(8) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 83eac0ab288b..e3bb5b5bccb8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -45,6 +45,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -67,6 +68,7 @@ public class TestKeyPurging { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -77,7 +79,6 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) - .setHbInterval(200) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index b8e115864727..58d19d846d4a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -50,6 +50,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION_PRIOR_FINALIZATION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; @@ -91,12 +92,12 @@ class TestOMBucketLayoutUpgrade { @BeforeAll void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, fromLayoutVersion); String omServiceId = UUID.randomUUID().toString(); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) .setNumDatanodes(1) - .setOmLayoutVersion(fromLayoutVersion) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index fa84130c9d6f..22358cbe6bb7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.LAYOUT_VERSION_KEY; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.assertClusterPrepared; import static org.apache.hadoop.ozone.om.OMUpgradeTestUtils.waitForFinalization; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager.maxLayoutVersion; import static org.apache.ozone.test.GenericTestUtils.waitFor; @@ -103,11 +104,11 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, INITIAL_VERSION.layoutVersion()); return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) .setNumDatanodes(1) - .setOmLayoutVersion(INITIAL_VERSION.layoutVersion()) .build(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 14b1a30b44f1..2f06304bd1e3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -56,9 +56,12 @@ import java.util.List; import java.util.Map; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -97,9 +100,9 @@ public void init() throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_STALENODE_INTERVAL, "10s"); conf.set(OZONE_SCM_DEADNODE_INTERVAL, "25s"); + conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, MILLISECONDS); + conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 500, MILLISECONDS); builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) .setStartDataNodes(false); cluster = builder.build(); cluster.startHddsDatanodes(); @@ -323,8 +326,6 @@ public void testSCMSafeModeDisabled() throws Exception { conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, 3); builder = MiniOzoneCluster.newBuilder(conf) - .setHbInterval(1000) - .setHbProcessorInterval(500) .setNumDatanodes(3); cluster = builder.build(); StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java index 1cb436dcb38d..078266581cbc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java @@ -49,6 +49,7 @@ import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -76,9 +77,9 @@ public static void initClusterProvider() throws Exception { conf.setBoolean( OMMultiTenantManagerImpl.OZONE_OM_TENANT_DEV_SKIP_RANGER, true); conf.setBoolean(OZONE_OM_MULTITENANCY_ENABLED, true); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.INITIAL_VERSION.layoutVersion()); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) - .withoutDatanodes() - .setOmLayoutVersion(OMLayoutFeature.INITIAL_VERSION.layoutVersion()); + .withoutDatanodes(); cluster = builder.build(); client = cluster.newClient(); s3VolumeName = HddsClientUtils.getDefaultS3VolumeName(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 029b0813bb55..587ae18f8627 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -119,6 +119,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; +import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; @@ -207,10 +208,10 @@ private void init() throws Exception { conf.setEnum(HDDS_DB_PROFILE, DBProfile.TEST); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); cluster = MiniOzoneCluster.newBuilder(conf) .setNumOfOzoneManagers(3) - .setOmLayoutVersion(OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml index 4e79ae97fc24..21a7715305f8 100644 --- a/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml +++ b/hadoop-ozone/integration-test/src/test/resources/ozone-site.xml @@ -56,6 +56,21 @@ true + + + hdds.heartbeat.interval + 1s + + + ozone.scm.heartbeat.thread.interval + 100ms + + + + ozone.scm.ratis.pipeline.limit + 3 + + ozone.scm.close.container.wait.duration 1s From 5715aee57168698a78075aa12f01c0d57b5b1f7d Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 8 Feb 2024 12:26:34 -0800 Subject: [PATCH 005/108] HDDS-10328. Support cross realm Kerberos out of box. (#6192) --- hadoop-hdds/common/src/main/resources/ozone-default.xml | 8 ++++++++ .../java/org/apache/hadoop/ozone/om/OMConfigKeys.java | 2 ++ 2 files changed, 10 insertions(+) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 094fbff16da7..bfb0547caf60 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2226,6 +2226,14 @@ OZONE, SECURITY, KERBEROS The OzoneManager service principal. Ex om/_HOST@REALM.COM + + ozone.om.kerberos.principal.pattern + * + + A client-side RegEx that can be configured to control + allowed realms to authenticate with (useful in cross-realm env.) + + ozone.om.http.auth.kerberos.principal HTTP/_HOST@REALM diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 5dd7579eb916..faa5096baf98 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -289,6 +289,8 @@ private OMConfigKeys() { + "kerberos.keytab.file"; public static final String OZONE_OM_KERBEROS_PRINCIPAL_KEY = "ozone.om" + ".kerberos.principal"; + public static final String OZONE_OM_KERBEROS_PRINCIPAL_PATTERN_KEY = + "ozone.om.kerberos.principal.pattern"; public static final String OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE = "ozone.om.http.auth.kerberos.keytab"; public static final String OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY From 601fd413a02ba60bc621a2bb000f8261b35d2829 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 8 Feb 2024 22:57:40 +0100 Subject: [PATCH 006/108] HDDS-10322. Make VolumeArgs immutable (#6193) --- .../hadoop/ozone/client/VolumeArgs.java | 47 +++++++++---------- .../hadoop/ozone/client/rpc/RpcClient.java | 5 +- .../AbstractRootedOzoneFileSystemTest.java | 20 ++++---- .../snapshot/TestOzoneManagerSnapshotAcl.java | 2 +- .../hadoop/ozone/client/ObjectStoreStub.java | 3 +- 5 files changed, 34 insertions(+), 43 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java index 9d683c5393c2..a1c9cd55bb3f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/VolumeArgs.java @@ -18,10 +18,13 @@ package org.apache.hadoop.ozone.client; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -36,8 +39,8 @@ public final class VolumeArgs { private final String owner; private final long quotaInBytes; private final long quotaInNamespace; - private final List acls; - private Map metadata; + private final ImmutableList acls; + private final ImmutableMap metadata; /** * Private constructor, constructed via builder. @@ -58,8 +61,8 @@ private VolumeArgs(String admin, this.owner = owner; this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; - this.acls = acls; - this.metadata = metadata; + this.acls = acls == null ? ImmutableList.of() : ImmutableList.copyOf(acls); + this.metadata = metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(metadata); } /** @@ -107,34 +110,20 @@ public List getAcls() { return acls; } - /** - * Returns new builder class that builds a OmVolumeArgs. - * - * @return Builder - */ public static VolumeArgs.Builder newBuilder() { return new VolumeArgs.Builder(); } /** - * Builder for OmVolumeArgs. + * Builder for VolumeArgs. */ - @SuppressWarnings("checkstyle:hiddenfield") public static class Builder { private String adminName; private String ownerName; - private long quotaInBytes; - private long quotaInNamespace; - private List listOfAcls; - private Map metadata = new HashMap<>(); - - /** - * Constructs a builder. - */ - public Builder() { - quotaInBytes = OzoneConsts.QUOTA_RESET; - quotaInNamespace = OzoneConsts.QUOTA_RESET; - } + private long quotaInBytes = OzoneConsts.QUOTA_RESET; + private long quotaInNamespace = OzoneConsts.QUOTA_RESET; + private List acls; + private Map metadata; public VolumeArgs.Builder setAdmin(String admin) { this.adminName = admin; @@ -157,12 +146,18 @@ public VolumeArgs.Builder setQuotaInNamespace(long quota) { } public VolumeArgs.Builder addMetadata(String key, String value) { + if (metadata == null) { + metadata = new HashMap<>(); + } metadata.put(key, value); return this; } - public VolumeArgs.Builder setAcls(List acls) + public VolumeArgs.Builder addAcl(OzoneAcl acl) throws IOException { - this.listOfAcls = acls; + if (acls == null) { + acls = new ArrayList<>(); + } + acls.add(acl); return this; } @@ -172,7 +167,7 @@ public VolumeArgs.Builder setAcls(List acls) */ public VolumeArgs build() { return new VolumeArgs(adminName, ownerName, quotaInBytes, - quotaInNamespace, listOfAcls, metadata); + quotaInNamespace, acls, metadata); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 7e1e6fe45602..8343b8740169 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -430,8 +430,9 @@ public void createVolume(String volumeName, VolumeArgs volArgs) userGroups.stream().forEach((group) -> listOfAcls.add( new OzoneAcl(ACLIdentityType.GROUP, group, groupRights, ACCESS))); //ACLs from VolumeArgs - if (volArgs.getAcls() != null) { - listOfAcls.addAll(volArgs.getAcls()); + List volumeAcls = volArgs.getAcls(); + if (volumeAcls != null) { + listOfAcls.addAll(volumeAcls); } OmVolumeArgs.Builder builder = OmVolumeArgs.newBuilder(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index cb33970d8712..8ee82633d59a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -1185,18 +1185,14 @@ void testSharedTmpDir() throws IOException { BitSet aclRights = new BitSet(); aclRights.set(READ.ordinal()); aclRights.set(WRITE.ordinal()); - List objectAcls = new ArrayList<>(); - objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", - aclRights, ACCESS)); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); // volume acls have all access to admin and read+write access to world // Construct VolumeArgs - VolumeArgs volumeArgs = new VolumeArgs.Builder() + VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setAdmin("admin") .setOwner("admin") - .setAcls(Collections.unmodifiableList(objectAcls)) + .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", aclRights, ACCESS)) + .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, ACCESS)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); // Sanity check @@ -1227,7 +1223,7 @@ void testSharedTmpDir() throws IOException { } // set acls for shared tmp mount under the tmp volume - objectAcls.clear(); + List objectAcls = new ArrayList<>(); objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, ACCESS)); aclRights.clear(DELETE.ordinal()); @@ -1302,8 +1298,8 @@ void testTempMount() throws IOException { OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", userRights, ACCESS); // Construct VolumeArgs - VolumeArgs volumeArgs = new VolumeArgs.Builder() - .setAcls(Collections.singletonList(aclWorldAccess)) + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .addAcl(aclWorldAccess) .setQuotaInNamespace(1000).build(); // Sanity check assertNull(volumeArgs.getOwner()); @@ -2303,8 +2299,8 @@ void testNonPrivilegedUserMkdirCreateBucket() throws IOException { OzoneAcl aclWorldAccess = new OzoneAcl(ACLIdentityType.WORLD, "", userRights, ACCESS); // Construct VolumeArgs, set ACL to world access - VolumeArgs volumeArgs = new VolumeArgs.Builder() - .setAcls(Collections.singletonList(aclWorldAccess)) + VolumeArgs volumeArgs = VolumeArgs.newBuilder() + .addAcl(aclWorldAccess) .build(); proxy.createVolume(volume, volumeArgs); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index 8c0b375c3ca9..e9b7e59b4fd6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -630,7 +630,7 @@ private void createBucket(BucketLayout bucketLayout, private void createVolume() throws IOException { final String volumePrefix = "volume-"; volumeName = volumePrefix + RandomStringUtils.randomNumeric(32); - final VolumeArgs volumeArgs = new VolumeArgs.Builder() + final VolumeArgs volumeArgs = VolumeArgs.newBuilder() .setAdmin(ADMIN) .setOwner(ADMIN) .build(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java index b79e49f834cb..e9fb15e613fe 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ObjectStoreStub.java @@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.client; import java.io.IOException; -import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -62,7 +61,7 @@ public void createVolume(String volumeName) throws IOException { .setAdmin("root") .setOwner("root") .setQuotaInBytes(Integer.MAX_VALUE) - .setAcls(new ArrayList<>()).build()); + .build()); } @Override From c1efa330d338d586cbe3a10886f9ce936b5b77df Mon Sep 17 00:00:00 2001 From: Arafat2198 <98023601+ArafatKhan2198@users.noreply.github.com> Date: Fri, 9 Feb 2024 07:56:44 +0530 Subject: [PATCH 007/108] HDDS-8627. Recon - API for Count of deletePending directories (#5037) --- .../ozone/recon/api/OMDBInsightEndpoint.java | 31 ++ .../tasks/DeletedKeysInsightHandler.java | 147 ++++++ .../ozone/recon/tasks/OmTableHandler.java | 131 +++++ .../ozone/recon/tasks/OmTableInsightTask.java | 321 ++++--------- .../recon/tasks/OpenKeysInsightHandler.java | 163 +++++++ .../recon/OMMetadataManagerTestUtils.java | 18 +- .../hadoop/ozone/recon/api/TestEndpoints.java | 15 +- .../recon/tasks/TestOmTableInsightTask.java | 448 ++++++++++++++---- 8 files changed, 925 insertions(+), 349 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java index 84f55749a68f..baa9c522be10 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -58,6 +58,7 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; @@ -652,6 +653,36 @@ public Response getDeletedDirInfo( return Response.ok(deletedDirInsightInfo).build(); } + /** + * Retrieves the summary of deleted directories. + * + * This method calculates and returns a summary of deleted directories. + * @return The HTTP response body includes a map with the following entries: + * - "totalDeletedDirectories": the total number of deleted directories + * + * Example response: + * { + * "totalDeletedDirectories": 8, + * } + */ + @GET + @Path("/deletePending/dirs/summary") + public Response getDeletedDirectorySummary() { + Map dirSummary = new HashMap<>(); + // Create a keys summary for deleted directories + createSummaryForDeletedDirectories(dirSummary); + return Response.ok(dirSummary).build(); + } + + private void createSummaryForDeletedDirectories( + Map dirSummary) { + // Fetch the necessary metrics for deleted directories. + Long deletedDirCount = getValueFromId(globalStatsDao.findById( + OmTableInsightTask.getTableCountKeyFromTable(DELETED_DIR_TABLE))); + // Calculate the total number of deleted directories + dirSummary.put("totalDeletedDirectories", deletedDirCount); + } + private void updateReplicatedAndUnReplicatedTotal( KeyInsightInfoResponse deletedKeyAndDirInsightInfo, RepeatedOmKeyInfo repeatedOmKeyInfo) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java new file mode 100644 index 000000000000..5a6d7a256e49 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/DeletedKeysInsightHandler.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the Deleted Table, updating counts and sizes of + * pending Key Deletions in the backend. + */ +public class DeletedKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(DeletedKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been backlogged in the backend for deletion. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + result.getLeft()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + result.getRight()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + + } + + /** + * Invoked by the process method to remove information on those keys that have + * been successfully deleted from the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = + (RepeatedOmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> + count > 0 ? count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > result.getRight() ? size - result.getRight() : + 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update the statistics on the keys + * pending to be deleted. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + // The size of deleted keys cannot change hence no-op. + return; + } + + /** + * Invoked by the reprocess method to calculate the records count of the + * deleted table and the sizes of replicated and unreplicated keys that are + * pending deletion in Ozone. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv + .getValue(); + Pair result = repeatedOmKeyInfo.getTotalSize(); + unReplicatedSize += result.getRight(); + replicatedSize += result.getLeft(); + count += repeatedOmKeyInfo.getOmKeyInfoList().size(); + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java new file mode 100644 index 000000000000..5ae23b68a703 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableHandler.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Interface for handling PUT, DELETE and UPDATE events for size-related + * tables for OM Insights. + */ +public interface OmTableHandler { + + /** + * Handles a PUT event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The PUT event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles a DELETE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The DELETE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Handles an UPDATE event for size-related tables by updating both the data + * sizes and their corresponding record counts in the tables. + * + * @param event The UPDATE event to be processed. + * @param tableName Table name associated with the event. + * @param objectCountMap A map storing object counts. + * @param unReplicatedSizeMap A map storing unReplicated size counts. + * @param replicatedSizeMap A map storing replicated size counts. + */ + void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap); + + + /** + * Returns a triple with the total count of records (left), total unreplicated + * size (middle), and total replicated size (right) in the given iterator. + * Increments count for each record and adds the dataSize if a record's value + * is an instance of OmKeyInfo,RepeatedOmKeyInfo. + * If the iterator is null, returns (0, 0, 0). + * + * @param iterator The iterator over the table to be iterated. + * @return A Triple with three Long values representing the count, + * unReplicated size and replicated size. + * @throws IOException If an I/O error occurs during the iterator traversal. + */ + Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException; + + + /** + * Returns the count key for the given table. + * + * @param tableName The name of the table. + * @return The count key for the table. + */ + default String getTableCountKeyFromTable(String tableName) { + return tableName + "Count"; + } + + /** + * Returns the replicated size key for the given table. + * + * @param tableName The name of the table. + * @return The replicated size key for the table. + */ + default String getReplicatedSizeKeyFromTable(String tableName) { + return tableName + "ReplicatedDataSize"; + } + + /** + * Returns the unreplicated size key for the given table. + * + * @param tableName The name of the table. + * @return The unreplicated size key for the table. + */ + default String getUnReplicatedSizeKeyFromTable(String tableName) { + return tableName + "UnReplicatedDataSize"; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index c814d9d9e33f..3e84f311c942 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -26,8 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats; @@ -37,22 +35,20 @@ import java.io.IOException; import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.List; import java.util.Map; - - +import java.util.Collection; import java.util.Map.Entry; +import java.util.ArrayList; +import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.jooq.impl.DSL.currentTimestamp; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.jooq.impl.DSL.select; import static org.jooq.impl.DSL.using; +import static org.jooq.impl.DSL.currentTimestamp; /** * Class to iterate over the OM DB and store the total counts of volumes, @@ -65,14 +61,21 @@ public class OmTableInsightTask implements ReconOmTask { private GlobalStatsDao globalStatsDao; private Configuration sqlConfiguration; private ReconOMMetadataManager reconOMMetadataManager; + private Map tableHandlers; @Inject public OmTableInsightTask(GlobalStatsDao globalStatsDao, - Configuration sqlConfiguration, - ReconOMMetadataManager reconOMMetadataManager) { + Configuration sqlConfiguration, + ReconOMMetadataManager reconOMMetadataManager) { this.globalStatsDao = globalStatsDao; this.sqlConfiguration = sqlConfiguration; this.reconOMMetadataManager = reconOMMetadataManager; + + // Initialize table handlers + tableHandlers = new HashMap<>(); + tableHandlers.put(OPEN_KEY_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(OPEN_FILE_TABLE, new OpenKeysInsightHandler()); + tableHandlers.put(DELETED_TABLE, new DeletedKeysInsightHandler()); } /** @@ -90,8 +93,8 @@ public OmTableInsightTask(GlobalStatsDao globalStatsDao, @Override public Pair reprocess(OMMetadataManager omMetadataManager) { HashMap objectCountMap = initializeCountMap(); - HashMap unReplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); for (String tableName : getTaskTables()) { Table table = omMetadataManager.getTable(tableName); @@ -100,16 +103,16 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } - try ( - TableIterator> iterator - = table.iterator()) { - if (getTablesToCalculateSize().contains(tableName)) { - Triple details = getTableSizeAndCount(iterator); + try (TableIterator> iterator + = table.iterator()) { + if (tableHandlers.containsKey(tableName)) { + Triple details = + tableHandlers.get(tableName).getTableSizeAndCount(iterator); objectCountMap.put(getTableCountKeyFromTable(tableName), details.getLeft()); - unReplicatedSizeCountMap.put( + unReplicatedSizeMap.put( getUnReplicatedSizeKeyFromTable(tableName), details.getMiddle()); - replicatedSizeCountMap.put(getReplicatedSizeKeyFromTable(tableName), + replicatedSizeMap.put(getReplicatedSizeKeyFromTable(tableName), details.getRight()); } else { long count = Iterators.size(iterator); @@ -124,72 +127,17 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unReplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unReplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'reprocess' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); } - /** - * Returns a triple with the total count of records (left), total unreplicated - * size (middle), and total replicated size (right) in the given iterator. - * Increments count for each record and adds the dataSize if a record's value - * is an instance of OmKeyInfo. If the iterator is null, returns (0, 0, 0). - * - * @param iterator The iterator over the table to be iterated. - * @return A Triple with three Long values representing the count, - * unreplicated size and replicated size. - * @throws IOException If an I/O error occurs during the iterator traversal. - */ - private Triple getTableSizeAndCount( - TableIterator> iterator) - throws IOException { - long count = 0; - long unReplicatedSize = 0; - long replicatedSize = 0; - - if (iterator != null) { - while (iterator.hasNext()) { - Table.KeyValue kv = iterator.next(); - if (kv != null && kv.getValue() != null) { - if (kv.getValue() instanceof OmKeyInfo) { - OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); - unReplicatedSize += omKeyInfo.getDataSize(); - replicatedSize += omKeyInfo.getReplicatedSize(); - count++; - } - if (kv.getValue() instanceof RepeatedOmKeyInfo) { - RepeatedOmKeyInfo repeatedOmKeyInfo = (RepeatedOmKeyInfo) kv - .getValue(); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unReplicatedSize += result.getRight(); - replicatedSize += result.getLeft(); - // Since we can have multiple deleted keys of same name - count += repeatedOmKeyInfo.getOmKeyInfoList().size(); - } - } - } - } - - return Triple.of(count, unReplicatedSize, replicatedSize); - } - - /** - * Returns a collection of table names that require data size calculation. - */ - public Collection getTablesToCalculateSize() { - List taskTables = new ArrayList<>(); - taskTables.add(OPEN_KEY_TABLE); - taskTables.add(OPEN_FILE_TABLE); - taskTables.add(DELETED_TABLE); - return taskTables; - } - @Override public String getTaskName() { return "OmTableInsightTask"; @@ -211,10 +159,9 @@ public Pair process(OMUpdateEventBatch events) { Iterator eventIterator = events.getIterator(); // Initialize maps to store count and size information HashMap objectCountMap = initializeCountMap(); - HashMap unreplicatedSizeCountMap = initializeSizeMap(false); - HashMap replicatedSizeCountMap = initializeSizeMap(true); + HashMap unReplicatedSizeMap = initializeSizeMap(false); + HashMap replicatedSizeMap = initializeSizeMap(true); final Collection taskTables = getTaskTables(); - final Collection sizeRelatedTables = getTablesToCalculateSize(); // Process each update event while (eventIterator.hasNext()) { @@ -223,22 +170,21 @@ public Pair process(OMUpdateEventBatch events) { if (!taskTables.contains(tableName)) { continue; } - try { switch (omdbUpdateEvent.getAction()) { case PUT: - handlePutEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handlePutEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case DELETE: - handleDeleteEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleDeleteEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; case UPDATE: - handleUpdateEvent(omdbUpdateEvent, tableName, sizeRelatedTables, - objectCountMap, unreplicatedSizeCountMap, replicatedSizeCountMap); + handleUpdateEvent(omdbUpdateEvent, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); break; default: @@ -256,11 +202,11 @@ public Pair process(OMUpdateEventBatch events) { if (!objectCountMap.isEmpty()) { writeDataToDB(objectCountMap); } - if (!unreplicatedSizeCountMap.isEmpty()) { - writeDataToDB(unreplicatedSizeCountMap); + if (!unReplicatedSizeMap.isEmpty()) { + writeDataToDB(unReplicatedSizeMap); } - if (!replicatedSizeCountMap.isEmpty()) { - writeDataToDB(replicatedSizeCountMap); + if (!replicatedSizeMap.isEmpty()) { + writeDataToDB(replicatedSizeMap); } LOG.info("Completed a 'process' run of OmTableInsightTask."); return new ImmutablePair<>(getTaskName(), true); @@ -268,65 +214,34 @@ public Pair process(OMUpdateEventBatch events) { private void handlePutEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTablePutEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); - } else { - String countKey = getTableCountKeyFromTable(tableName); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - } - } - - private void handleSizeRelatedTablePutEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle PUT for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + omKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + omKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle PUT for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count + repeatedOmKeyInfo.getOmKeyInfoList().size()); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size + result.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size + result.getRight()); + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); + if (event.getValue() != null) { + if (tableHandler != null) { + tableHandler.handlePutEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); + } else { + String countKey = getTableCountKeyFromTable(tableName); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + } } } private void handleDeleteEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) + throws IOException { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { - handleSizeRelatedTableDeleteEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + if (tableHandler != null) { + tableHandler.handleDeleteEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } else { String countKey = getTableCountKeyFromTable(tableName); objectCountMap.computeIfPresent(countKey, @@ -335,109 +250,28 @@ private void handleDeleteEvent(OMDBUpdateEvent event, } } - private void handleSizeRelatedTableDeleteEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - if (event.getValue() instanceof OmKeyInfo) { - // Handle DELETE for OpenKeyTable & OpenFileTable - OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? count - 1L : 0L); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > omKeyInfo.getDataSize() ? - size - omKeyInfo.getDataSize() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > omKeyInfo.getReplicatedSize() ? - size - omKeyInfo.getReplicatedSize() : 0L); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle DELETE for DeletedTable - RepeatedOmKeyInfo repeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, (k, count) -> count > 0 ? - count - repeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair result = repeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size > result.getLeft() ? size - result.getLeft() : 0L); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size > result.getRight() ? size - result.getRight() : - 0L); - } - } private void handleUpdateEvent(OMDBUpdateEvent event, String tableName, - Collection sizeRelatedTables, HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + OmTableHandler tableHandler = tableHandlers.get(tableName); if (event.getValue() != null) { - if (sizeRelatedTables.contains(tableName)) { + if (tableHandler != null) { // Handle update for only size related tables - handleSizeRelatedTableUpdateEvent(event, tableName, objectCountMap, - unreplicatedSizeCountMap, replicatedSizeCountMap); + tableHandler.handleUpdateEvent(event, tableName, objectCountMap, + unReplicatedSizeMap, replicatedSizeMap); } } } - - private void handleSizeRelatedTableUpdateEvent( - OMDBUpdateEvent event, - String tableName, - HashMap objectCountMap, - HashMap unreplicatedSizeCountMap, - HashMap replicatedSizeCountMap) { - - if (event.getOldValue() == null) { - LOG.warn("Update event does not have the old Key Info for {}.", - event.getKey()); - return; - } - String countKey = getTableCountKeyFromTable(tableName); - String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); - String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); - - // In Update event the count for the open table will not change. So we don't - // need to update the count. Except for RepeatedOmKeyInfo, for which the - // size of omKeyInfoList can change - if (event.getValue() instanceof OmKeyInfo) { - // Handle UPDATE for OpenKeyTable & OpenFileTable - OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); - OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldKeyInfo.getDataSize() + - newKeyInfo.getDataSize()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldKeyInfo.getReplicatedSize() + - newKeyInfo.getReplicatedSize()); - } else if (event.getValue() instanceof RepeatedOmKeyInfo) { - // Handle UPDATE for DeletedTable - RepeatedOmKeyInfo oldRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getOldValue(); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = - (RepeatedOmKeyInfo) event.getValue(); - objectCountMap.computeIfPresent(countKey, - (k, count) -> count > 0 ? - count - oldRepeatedOmKeyInfo.getOmKeyInfoList().size() + - newRepeatedOmKeyInfo.getOmKeyInfoList().size() : 0L); - Pair oldSize = oldRepeatedOmKeyInfo.getTotalSize(); - Pair newSize = newRepeatedOmKeyInfo.getTotalSize(); - unreplicatedSizeCountMap.computeIfPresent(unReplicatedSizeKey, - (k, size) -> size - oldSize.getLeft() + newSize.getLeft()); - replicatedSizeCountMap.computeIfPresent(replicatedSizeKey, - (k, size) -> size - oldSize.getRight() + newSize.getRight()); - } - } - - + /** + * Write the updated count and size information to the database. + * + * @param dataMap Map containing the updated count and size information. + */ private void writeDataToDB(Map dataMap) { List insertGlobalStats = new ArrayList<>(); List updateGlobalStats = new ArrayList<>(); @@ -461,6 +295,11 @@ private void writeDataToDB(Map dataMap) { globalStatsDao.update(updateGlobalStats); } + /** + * Initializes and returns a count map with the counts for the tables. + * + * @return The count map containing the counts for each table. + */ private HashMap initializeCountMap() { Collection tables = getTaskTables(); HashMap objectCountMap = new HashMap<>(tables.size()); @@ -478,11 +317,13 @@ private HashMap initializeCountMap() { * @return The size map containing the size counts for each table. */ private HashMap initializeSizeMap(boolean replicated) { - Collection tables = getTablesToCalculateSize(); - HashMap sizeCountMap = new HashMap<>(tables.size()); - for (String tableName : tables) { - String key = replicated ? getReplicatedSizeKeyFromTable(tableName) : - getUnReplicatedSizeKeyFromTable(tableName); + HashMap sizeCountMap = new HashMap<>(); + for (Map.Entry entry : tableHandlers.entrySet()) { + String tableName = entry.getKey(); + OmTableHandler tableHandler = entry.getValue(); + String key = + replicated ? tableHandler.getReplicatedSizeKeyFromTable(tableName) : + tableHandler.getUnReplicatedSizeKeyFromTable(tableName); sizeCountMap.put(key, getValueForKey(key)); } return sizeCountMap; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java new file mode 100644 index 000000000000..7a27d29d8f28 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OpenKeysInsightHandler.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; + +/** + * Manages records in the OpenKey Table, updating counts and sizes of + * open keys in the backend. + */ +public class OpenKeysInsightHandler implements OmTableHandler { + + private static final Logger LOG = + LoggerFactory.getLogger(OpenKeysInsightHandler.class); + + /** + * Invoked by the process method to add information on those keys that have + * been open in the backend. + */ + @Override + public void handlePutEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, (k, count) -> count + 1L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size + omKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size + omKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Put event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to delete information on those keys that are + * no longer closed in the backend. + */ + @Override + public void handleDeleteEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + String countKey = getTableCountKeyFromTable(tableName); + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + if (event.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) event.getValue(); + objectCountMap.computeIfPresent(countKey, + (k, count) -> count > 0 ? count - 1L : 0L); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size > omKeyInfo.getDataSize() ? + size - omKeyInfo.getDataSize() : 0L); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size > omKeyInfo.getReplicatedSize() ? + size - omKeyInfo.getReplicatedSize() : 0L); + } else { + LOG.warn("Delete event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * Invoked by the process method to update information on those open keys that + * have been updated in the backend. + */ + @Override + public void handleUpdateEvent(OMDBUpdateEvent event, + String tableName, + HashMap objectCountMap, + HashMap unReplicatedSizeMap, + HashMap replicatedSizeMap) { + + if (event.getValue() != null) { + if (event.getOldValue() == null) { + LOG.warn("Update event does not have the old Key Info for {}.", + event.getKey()); + return; + } + String unReplicatedSizeKey = getUnReplicatedSizeKeyFromTable(tableName); + String replicatedSizeKey = getReplicatedSizeKeyFromTable(tableName); + + // In Update event the count for the open table will not change. So we + // don't need to update the count. + OmKeyInfo oldKeyInfo = (OmKeyInfo) event.getOldValue(); + OmKeyInfo newKeyInfo = (OmKeyInfo) event.getValue(); + unReplicatedSizeMap.computeIfPresent(unReplicatedSizeKey, + (k, size) -> size - oldKeyInfo.getDataSize() + + newKeyInfo.getDataSize()); + replicatedSizeMap.computeIfPresent(replicatedSizeKey, + (k, size) -> size - oldKeyInfo.getReplicatedSize() + + newKeyInfo.getReplicatedSize()); + } else { + LOG.warn("Update event does not have the Key Info for {}.", + event.getKey()); + } + } + + /** + * This method is called by the reprocess method. It calculates the record + * counts for both the open key table and the open file table. Additionally, + * it computes the sizes of both replicated and unreplicated keys + * that are currently open in the backend. + */ + @Override + public Triple getTableSizeAndCount( + TableIterator> iterator) + throws IOException { + long count = 0; + long unReplicatedSize = 0; + long replicatedSize = 0; + + if (iterator != null) { + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + if (kv != null && kv.getValue() != null) { + OmKeyInfo omKeyInfo = (OmKeyInfo) kv.getValue(); + unReplicatedSize += omKeyInfo.getDataSize(); + replicatedSize += omKeyInfo.getReplicatedSize(); + count++; + } + } + } + return Triple.of(count, unReplicatedSize, replicatedSize); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java index 42d69e030f31..b1aecc9a4f4e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/OMMetadataManagerTestUtils.java @@ -397,23 +397,31 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager, .build()); } + @SuppressWarnings("parameternumber") public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager, String bucketName, String volumeName, String dirName, long parentObjectId, long bucketObjectId, - long volumeObjectId) + long volumeObjectId, + long objectId) throws IOException { - // DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName" - String omKey = omMetadataManager.getOzonePathKey(volumeObjectId, - bucketObjectId, parentObjectId, dirName); + // DB key in DeletedDirectoryTable => + // "volumeID/bucketID/parentId/dirName/dirObjectId" + + String ozoneDbKey = omMetadataManager.getOzonePathKey(volumeObjectId, + bucketObjectId, parentObjectId, dirName); + String ozoneDeleteKey = omMetadataManager.getOzoneDeletePathKey( + objectId, ozoneDbKey); + - omMetadataManager.getDeletedDirTable().put(omKey, + omMetadataManager.getDeletedDirTable().put(ozoneDeleteKey, new OmKeyInfo.Builder() .setBucketName(bucketName) .setVolumeName(volumeName) .setKeyName(dirName) + .setObjectID(objectId) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 05d9927d6c93..42aabef0cf15 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -288,8 +288,9 @@ private void initializeInjector() throws Exception { utilizationSchemaDefinition); fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition); - omTableInsightTask = new OmTableInsightTask( - globalStatsDao, sqlConfiguration, reconOMMetadataManager); + omTableInsightTask = + new OmTableInsightTask(globalStatsDao, sqlConfiguration, + reconOMMetadataManager); containerHealthSchemaManager = reconTestInjector.getInstance(ContainerHealthSchemaManager.class); clusterStateEndpoint = @@ -515,11 +516,11 @@ public void setUp() throws Exception { // Populate the deletedDirectories table in OM DB writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1", - 3L, 2L, 1L); + 3L, 2L, 1L, 23L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2", - 6L, 5L, 4L); + 6L, 5L, 4L, 22L); writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3", - 9L, 8L, 7L); + 9L, 8L, 7L, 21L); // Truncate global stats table before running each test dslContext.truncate(GLOBAL_STATS); @@ -594,7 +595,7 @@ public void testGetDatanodes() throws Exception { (DatanodesResponse) response1.getEntity(); DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> - datanodeMetadata.getHostname().equals("host1.datanode")) + datanodeMetadata.getHostname().equals("host1.datanode")) .findFirst().orElse(null); return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && @@ -699,7 +700,7 @@ public void testGetMetricsResponse() throws Exception { byte[] fileBytes = FileUtils.readFileToByteArray( new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) .getFile()) - ); + ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index df014f4276fa..56d8fe213152 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -21,20 +21,28 @@ import org.apache.commons.lang3.tuple.ImmutablePair; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; - +import org.apache.hadoop.ozone.recon.spi.impl.ReconNamespaceSummaryManagerImpl; import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; import org.junit.jupiter.api.io.TempDir; import java.io.IOException; @@ -44,18 +52,20 @@ import java.util.Arrays; import java.util.List; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; -import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeOpenFileToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.DELETE; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT; import static org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE; @@ -66,29 +76,83 @@ import static org.mockito.Mockito.when; /** - * Unit test for Object Count Task. + * This test class is designed for the OM Table Insight Task. It conducts tests + * for tables that require both Size and Count, as well as for those that only + * require Count. */ public class TestOmTableInsightTask extends AbstractReconSqlDBTest { @TempDir private Path temporaryFolder; - private GlobalStatsDao globalStatsDao; - private OmTableInsightTask omTableInsightTask; - private DSLContext dslContext; + private static GlobalStatsDao globalStatsDao; + private static OmTableInsightTask omTableInsightTask; + private static DSLContext dslContext; private boolean isSetupDone = false; - private ReconOMMetadataManager reconOMMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithFSO nSSummaryTaskWithFso; + private static OzoneConfiguration ozoneConfiguration; + private static ReconNamespaceSummaryManagerImpl reconNamespaceSummaryManager; + + // Object names in FSO-enabled format + private static final String VOL = "volume1"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "dir1/dir2/file3"; + private static final String FILE_ONE = "file1"; + private static final String FILE_TWO = "file2"; + private static final String FILE_THREE = "file3"; + private static final String DIR_ONE = "dir1"; + private static final String DIR_TWO = "dir2"; + private static final String DIR_THREE = "dir3"; + + + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long DIR_ONE_OBJECT_ID = 14L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long DIR_TWO_OBJECT_ID = 17L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long DIR_THREE_OBJECT_ID = 10L; + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_SIZE = 1025L; + private static final long KEY_THREE_SIZE = 2000L; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + + @Mock + private Table nsSummaryTable; public TestOmTableInsightTask() { super(); } private void initializeInjector() throws IOException { + ozoneConfiguration = new OzoneConfiguration(); reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(Files.createDirectory( temporaryFolder.resolve("JunitOmDBDir")).toFile()), Files.createDirectory(temporaryFolder.resolve("NewDir")).toFile()); globalStatsDao = getDao(GlobalStatsDao.class); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withContainerDB() + .build(); + reconNamespaceSummaryManager = reconTestInjector.getInstance( + ReconNamespaceSummaryManagerImpl.class); + omTableInsightTask = new OmTableInsightTask( globalStatsDao, getConfiguration(), reconOMMetadataManager); + nSSummaryTaskWithFso = new NSSummaryTaskWithFSO( + reconNamespaceSummaryManager, reconOMMetadataManager, + ozoneConfiguration); dslContext = getDslContext(); } @@ -99,10 +163,182 @@ public void setUp() throws IOException { initializeInjector(); isSetupDone = true; } + MockitoAnnotations.openMocks(this); // Truncate table before running each test dslContext.truncate(GLOBAL_STATS); } + /** + * Populate OM-DB with the following structure. + * volume1 + * | \ + * bucket1 bucket2 + * / \ \ + * dir1 dir2 dir3 + * / \ \ + * file1 file2 file3 + * + * @throws IOException + */ + private void populateOMDB() throws IOException { + + // Create 2 Buckets bucket1 and bucket2 + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .build(); + String bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .build(); + bucketKey = reconOMMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo2); + + // Create a single volume named volume1 + String volumeKey = reconOMMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + reconOMMetadataManager.getVolumeTable().put(volumeKey, args); + + // Generate keys for the File Table + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + FILE_ONE, + KEY_ONE_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + FILE_TWO, + KEY_TWO_OBJECT_ID, + DIR_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + FILE_THREE, + KEY_THREE_OBJECT_ID, + DIR_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + + // Generate Deleted Directories in OM + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_ONE, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_ONE_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_ONE, + VOL, + DIR_TWO, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + DIR_TWO_OBJECT_ID); + writeDeletedDirToOm(reconOMMetadataManager, + BUCKET_TWO, + VOL, + DIR_THREE, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + DIR_THREE_OBJECT_ID); + } + + @Test + public void testReprocessForDeletedDirectory() throws Exception { + // Create keys and deleted directories + populateOMDB(); + + // Generate NamespaceSummary for the OM DB + nSSummaryTaskWithFso.reprocessWithFSO(reconOMMetadataManager); + + Pair result = + omTableInsightTask.reprocess(reconOMMetadataManager); + assertTrue(result.getRight()); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + + @Test + public void testProcessForDeletedDirectoryTable() throws IOException { + // Prepare mock data size + Long expectedSize1 = 1000L; + Long expectedSize2 = 2000L; + NSSummary nsSummary1 = new NSSummary(); + NSSummary nsSummary2 = new NSSummary(); + nsSummary1.setSizeOfFiles(expectedSize1); + nsSummary2.setSizeOfFiles(expectedSize2); + when(nsSummaryTable.get(1L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(2L)).thenReturn(nsSummary1); + when(nsSummaryTable.get(3L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(4L)).thenReturn(nsSummary2); + when(nsSummaryTable.get(5L)).thenReturn(nsSummary2); + + /* DB key in DeletedDirectoryTable => + "/volumeId/bucketId/parentId/dirName/dirObjectId" */ + List paths = Arrays.asList( + "/18/28/22/dir1/1", + "/18/26/23/dir1/2", + "/18/20/24/dir1/3", + "/18/21/25/dir1/4", + "/18/27/26/dir1/5" + ); + + // Testing PUT events + // Create 5 OMDBUpdateEvent instances for 5 different deletedDirectory paths + ArrayList putEvents = new ArrayList<>(); + for (long i = 0L; i < 5L; i++) { + putEvents.add(getOMUpdateEvent(paths.get((int) i), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), + DELETED_DIR_TABLE, PUT, null)); + } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + omTableInsightTask.process(putEventBatch); + assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); + + + // Testing DELETE events + // Create 2 OMDBUpdateEvent instances for 2 different deletedDirectory paths + ArrayList deleteEvents = new ArrayList<>(); + deleteEvents.add(getOMUpdateEvent(paths.get(0), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 1L, false), DELETED_DIR_TABLE, + DELETE, null)); + deleteEvents.add(getOMUpdateEvent(paths.get(2), + getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, + DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + omTableInsightTask.process(deleteEventBatch); + assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); + } + @Test public void testReprocessForCount() throws Exception { OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class); @@ -110,27 +346,32 @@ public void testReprocessForCount() throws Exception { // Mock 5 rows in each table and test the count for (String tableName : omTableInsightTask.getTaskTables()) { TypedTable table = mock(TypedTable.class); - TypedTable.TypedTableIterator mockIter = mock(TypedTable - .TypedTableIterator.class); + TypedTable.TypedTableIterator mockIter = + mock(TypedTable.TypedTableIterator.class); when(table.iterator()).thenReturn(mockIter); when(omMetadataManager.getTable(tableName)).thenReturn(table); - when(mockIter.hasNext()) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(true) - .thenReturn(false); + when(mockIter.hasNext()).thenReturn(true, true, true, true, true, false); + TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class); - when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + + if (tableName.equals(DELETED_TABLE)) { + RepeatedOmKeyInfo keyInfo = mock(RepeatedOmKeyInfo.class); + when(keyInfo.getTotalSize()).thenReturn(ImmutablePair.of(100L, 100L)); + when(keyInfo.getOmKeyInfoList()).thenReturn( + Arrays.asList(mock(OmKeyInfo.class))); + when(mockKeyValue.getValue()).thenReturn(keyInfo); + } else { + when(mockKeyValue.getValue()).thenReturn(mock(OmKeyInfo.class)); + } + when(mockIter.next()).thenReturn(mockKeyValue); } Pair result = omTableInsightTask.reprocess(omMetadataManager); - assertTrue(result.getRight()); + assertTrue(result.getRight()); assertEquals(5L, getCountForTable(KEY_TABLE)); assertEquals(5L, getCountForTable(VOLUME_TABLE)); assertEquals(5L, getCountForTable(BUCKET_TABLE)); @@ -138,7 +379,6 @@ public void testReprocessForCount() throws Exception { assertEquals(5L, getCountForTable(DELETED_TABLE)); } - @Test public void testReprocessForOpenKeyTable() throws Exception { // Populate the OpenKeys table in OM DB @@ -203,44 +443,73 @@ public void testReprocessForDeletedTable() throws Exception { @Test public void testProcessForCount() { - ArrayList events = new ArrayList<>(); - // Create 5 put, 1 delete and 1 update event for each table + List initialEvents = new ArrayList<>(); + + // Creating events for each table except the deleted table for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; // Skipping deleted table as it has a separate test + } + + // Adding 5 PUT events per table for (int i = 0; i < 5; i++) { - events.add(getOMUpdateEvent("item" + i, null, tableName, PUT, null)); + initialEvents.add( + getOMUpdateEvent("item" + i, mock(OmKeyInfo.class), tableName, PUT, + null)); } - // for delete event, if value is set to null, the counter will not be - // decremented. This is because the value will be null if item does not - // exist in the database and there is no need to delete. - events.add(getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, - DELETE, null)); - events.add(getOMUpdateEvent("item1", null, tableName, UPDATE, null)); + + // Adding 1 DELETE event where value is null, indicating non-existence + // in the database. + initialEvents.add( + getOMUpdateEvent("item0", mock(OmKeyInfo.class), tableName, DELETE, + null)); + // Adding 1 UPDATE event. This should not affect the count. + initialEvents.add( + getOMUpdateEvent("item1", mock(OmKeyInfo.class), tableName, UPDATE, + mock(OmKeyInfo.class))); } - OMUpdateEventBatch omUpdateEventBatch = new OMUpdateEventBatch(events); - omTableInsightTask.process(omUpdateEventBatch); - // Verify 4 items in each table. (5 puts - 1 delete + 0 update) - assertEquals(4L, getCountForTable(KEY_TABLE)); - assertEquals(4L, getCountForTable(VOLUME_TABLE)); - assertEquals(4L, getCountForTable(BUCKET_TABLE)); - assertEquals(4L, getCountForTable(FILE_TABLE)); + // Processing the initial batch of events + OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); + omTableInsightTask.process(initialBatch); - // add a new key and simulate delete on non-existing item (value: null) - ArrayList newEvents = new ArrayList<>(); + // Verifying the count in each table for (String tableName : omTableInsightTask.getTaskTables()) { - newEvents.add(getOMUpdateEvent("item5", null, tableName, PUT, null)); - // This delete event should be a noop since value is null - newEvents.add(getOMUpdateEvent("item0", null, tableName, DELETE, null)); + if (tableName.equals(DELETED_TABLE)) { + continue; + } + assertEquals(4L, getCountForTable( + tableName)); // 4 items expected after processing (5 puts - 1 delete) } - omUpdateEventBatch = new OMUpdateEventBatch(newEvents); - omTableInsightTask.process(omUpdateEventBatch); + List additionalEvents = new ArrayList<>(); + // Simulating new PUT and DELETE events + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // Adding 1 new PUT event + additionalEvents.add( + getOMUpdateEvent("item6", mock(OmKeyInfo.class), tableName, PUT, + null)); + // Attempting to delete a non-existing item (value: null) + additionalEvents.add( + getOMUpdateEvent("item0", null, tableName, DELETE, null)); + } - // Verify 5 items in each table. (1 new put + 0 delete) - assertEquals(5L, getCountForTable(KEY_TABLE)); - assertEquals(5L, getCountForTable(VOLUME_TABLE)); - assertEquals(5L, getCountForTable(BUCKET_TABLE)); - assertEquals(5L, getCountForTable(FILE_TABLE)); + // Processing the additional events + OMUpdateEventBatch additionalBatch = + new OMUpdateEventBatch(additionalEvents); + omTableInsightTask.process(additionalBatch); + // Verifying the final count in each table + for (String tableName : omTableInsightTask.getTaskTables()) { + if (tableName.equals(DELETED_TABLE)) { + continue; + } + // 5 items expected after processing the additional events. + assertEquals(5L, getCountForTable( + tableName)); + } } @Test @@ -251,35 +520,38 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { when(omKeyInfo.getDataSize()).thenReturn(sizeToBeReturned); when(omKeyInfo.getReplicatedSize()).thenReturn(sizeToBeReturned * 3); - // Test PUT events + // Test PUT events. + // Add 5 PUT events for OpenKeyTable and OpenFileTable. ArrayList putEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - for (int i = 0; i < 5; i++) { - putEvents.add( - getOMUpdateEvent("item" + i, omKeyInfo, tableName, PUT, null)); - } + for (int i = 0; i < 10; i++) { + String table = (i < 5) ? OPEN_KEY_TABLE : OPEN_FILE_TABLE; + putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); } + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); omTableInsightTask.process(putEventBatch); - // After 5 PUTs, size should be 5 * 1000 = 5000 for each size-related table - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + // After 5 PUTs, size should be 5 * 1000 = 5000 + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } // Test DELETE events ArrayList deleteEvents = new ArrayList<>(); - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { - // Delete "item0" - deleteEvents.add( - getOMUpdateEvent("item0", omKeyInfo, tableName, DELETE, null)); - } + // Delete "item0" for OpenKeyTable and OpenFileTable. + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_KEY_TABLE, DELETE, null)); + deleteEvents.add( + getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(4000L, getUnReplicatedSizeForTable(tableName)); assertEquals(12000L, getReplicatedSizeForTable(tableName)); } @@ -287,7 +559,8 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { // Test UPDATE events ArrayList updateEvents = new ArrayList<>(); Long newSizeToBeReturned = 2000L; - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { // Update "item1" with a new size OmKeyInfo newKeyInfo = mock(OmKeyInfo.class); when(newKeyInfo.getDataSize()).thenReturn(newSizeToBeReturned); @@ -295,12 +568,14 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { updateEvents.add( getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 // presentValue - oldValue + newValue = updatedValue - for (String tableName : omTableInsightTask.getTablesToCalculateSize()) { + for (String tableName : new ArrayList<>( + Arrays.asList(OPEN_KEY_TABLE, OPEN_FILE_TABLE))) { assertEquals(5000L, getUnReplicatedSizeForTable(tableName)); assertEquals(15000L, getReplicatedSizeForTable(tableName)); } @@ -313,9 +588,10 @@ public void testProcessForDeletedTable() { new ImmutablePair<>(1000L, 3000L); ArrayList omKeyInfoList = new ArrayList<>(); // Add 5 OmKeyInfo objects to the list - for (int i = 0; i < 5; i++) { + for (long i = 0; i < 5; i++) { OmKeyInfo omKeyInfo = - getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", true); + getOmKeyInfo("sampleVol", "non_fso_Bucket", "non_fso_key1", i + 1, + true); // Set properties of OmKeyInfo object if needed omKeyInfoList.add(omKeyInfo); } @@ -353,38 +629,14 @@ public void testProcessForDeletedTable() { // After deleting "item0", size should be 4 * 1000 = 4000 assertEquals(4000L, getUnReplicatedSizeForTable(DELETED_TABLE)); assertEquals(12000L, getReplicatedSizeForTable(DELETED_TABLE)); - - - // Test UPDATE events - ArrayList updateEvents = new ArrayList<>(); - // Update "item1" with new sizes - ImmutablePair newSizesToBeReturned = - new ImmutablePair<>(500L, 1500L); - RepeatedOmKeyInfo newRepeatedOmKeyInfo = mock(RepeatedOmKeyInfo.class); - when(newRepeatedOmKeyInfo.getTotalSize()).thenReturn(newSizesToBeReturned); - when(newRepeatedOmKeyInfo.getOmKeyInfoList()).thenReturn( - omKeyInfoList.subList(1, 5)); - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); - // For item1, newSize=500 and totalCount of deleted keys should be 4 - updateEvents.add( - getOMUpdateEvent("item1", newRepeatedOmKeyInfo, DELETED_TABLE, UPDATE, - repeatedOmKeyInfo)); - omTableInsightTask.process(updateEventBatch); - // Since one key has been deleted, total deleted keys should be 19 - assertEquals(19L, getCountForTable(DELETED_TABLE)); - // After updating "item1", size should be 4000 - 1000 + 500 = 3500 - // presentValue - oldValue + newValue = updatedValue - assertEquals(3500L, getUnReplicatedSizeForTable(DELETED_TABLE)); - assertEquals(10500L, getReplicatedSizeForTable(DELETED_TABLE)); } - private OMDBUpdateEvent getOMUpdateEvent( String name, Object value, String table, OMDBUpdateEvent.OMDBUpdateAction action, Object oldValue) { - return new OMUpdateEventBuilder() + return new OMDBUpdateEvent.OMUpdateEventBuilder() .setAction(action) .setKey(name) .setValue(value) @@ -409,7 +661,8 @@ private long getReplicatedSizeForTable(String tableName) { } private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, - String keyName, boolean isFile) { + String keyName, Long objectID, + boolean isFile) { return new OmKeyInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -418,6 +671,7 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setReplicationConfig(StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(100L) + .setObjectID(objectID) .build(); } } From 15b62de75fcaafa6fe4e747a2c23794083957db3 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 9 Feb 2024 12:35:37 +0100 Subject: [PATCH 008/108] HDDS-10333. RocksDB logger not closed (#6200) --- .../hadoop/hdds/utils/db/DBStoreBuilder.java | 8 +-- .../utils/db/managed/ManagedDBOptions.java | 14 +++++ .../hdds/utils/db/managed/ManagedLogger.java | 52 +++++++++++++++++++ 3 files changed, 68 insertions(+), 6 deletions(-) create mode 100644 hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 32fcbfec6e44..31089bc1c0b6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedLogger; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedStatistics; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; @@ -405,12 +406,7 @@ private ManagedDBOptions getDefaultDBOptions( // Apply logging settings. if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { - org.rocksdb.Logger logger = new org.rocksdb.Logger(dbOptions) { - @Override - protected void log(InfoLogLevel infoLogLevel, String s) { - ROCKS_DB_LOGGER.info(s); - } - }; + ManagedLogger logger = new ManagedLogger(dbOptions, (infoLogLevel, s) -> ROCKS_DB_LOGGER.info(s)); InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration .getRocksdbLogLevel() + "_LEVEL"); logger.setInfoLogLevel(level); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java index 638739ff557e..4eb2a0d2bc36 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedDBOptions.java @@ -18,20 +18,34 @@ */ package org.apache.hadoop.hdds.utils.db.managed; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.DBOptions; +import org.rocksdb.Logger; +import java.util.concurrent.atomic.AtomicReference; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.LOG; import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; /** * Managed DBOptions. */ public class ManagedDBOptions extends DBOptions { + private final UncheckedAutoCloseable leakTracker = track(this); + private final AtomicReference loggerRef = new AtomicReference<>(); + + @Override + public DBOptions setLogger(Logger logger) { + IOUtils.close(LOG, loggerRef.getAndSet(logger)); + return super.setLogger(logger); + } @Override public void close() { try { + IOUtils.close(LOG, loggerRef.getAndSet(null)); super.close(); } finally { leakTracker.close(); diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java new file mode 100644 index 000000000000..d04f91cd4e29 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedLogger.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.ratis.util.UncheckedAutoCloseable; +import org.rocksdb.InfoLogLevel; +import org.rocksdb.Logger; + +import java.util.function.BiConsumer; + +import static org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils.track; + +/** Managed {@link Logger}. */ +public class ManagedLogger extends Logger { + + private final UncheckedAutoCloseable leakTracker = track(this); + private final BiConsumer delegate; + + public ManagedLogger(ManagedDBOptions dbOptions, BiConsumer delegate) { + super(dbOptions); + this.delegate = delegate; + } + + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + delegate.accept(infoLogLevel, logMsg); + } + + @Override + public void close() { + try { + super.close(); + } finally { + leakTracker.close(); + } + } +} From 75df6c1b752ccc5db642251673bdf0cc9029a2f9 Mon Sep 17 00:00:00 2001 From: Duong Nguyen Date: Fri, 9 Feb 2024 10:02:45 -0800 Subject: [PATCH 009/108] HDDS-9843. Ozone client high memory (heap) utilization (#6153) Co-authored-by: Tsz-Wo Nicholas Sze --- .../hadoop/hdds/scm/storage/BufferPool.java | 1 + .../hadoop/hdds/utils/db/CodecBuffer.java | 9 ++++++- .../hadoop/ozone/common/ChunkBuffer.java | 10 ++++++-- .../common/ChunkBufferImplWithByteBuffer.java | 14 +++++++++++ .../ozone/common/IncrementalChunkBuffer.java | 19 +++++++++++--- .../hadoop/ozone/common/TestChunkBuffer.java | 23 ++++++++++++++--- .../container/upgrade/TestUpgradeManager.java | 25 ++++++++++++++----- .../hdds/scm/storage/TestCommitWatcher.java | 4 +++ 8 files changed, 90 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java index 274b977ef623..b68b56f67c72 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java @@ -111,6 +111,7 @@ void releaseBuffer(ChunkBuffer chunkBuffer) { } public void clearBufferPool() { + bufferList.forEach(ChunkBuffer::close); bufferList.clear(); currentBufferIndex = -1; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java index 64e494a5af10..1ac293b301bb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/CodecBuffer.java @@ -28,6 +28,7 @@ import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled; import org.apache.ratis.util.MemoizedSupplier; import org.apache.ratis.util.Preconditions; +import org.apache.ratis.util.UncheckedAutoCloseable; import org.apache.ratis.util.function.CheckedFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +51,7 @@ * A buffer used by {@link Codec} * for supporting RocksDB direct {@link ByteBuffer} APIs. */ -public class CodecBuffer implements AutoCloseable { +public class CodecBuffer implements UncheckedAutoCloseable { public static final Logger LOG = LoggerFactory.getLogger(CodecBuffer.class); /** To create {@link CodecBuffer} instances. */ @@ -340,6 +341,12 @@ public int readableBytes() { return buf.readableBytes(); } + /** @return a writable {@link ByteBuffer}. */ + public ByteBuffer asWritableByteBuffer() { + assertRefCnt(1); + return buf.nioBuffer(0, buf.maxCapacity()); + } + /** @return a readonly {@link ByteBuffer} view of this buffer. */ public ByteBuffer asReadOnlyByteBuffer() { assertRefCnt(1); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index 3948b5f04fc0..058934c2f27d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -27,10 +27,12 @@ import org.apache.hadoop.hdds.scm.ByteStringConversion; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.UncheckedAutoCloseable; /** Buffer for a block chunk. */ -public interface ChunkBuffer { +public interface ChunkBuffer extends UncheckedAutoCloseable { /** Similar to {@link ByteBuffer#allocate(int)}. */ static ChunkBuffer allocate(int capacity) { @@ -49,7 +51,8 @@ static ChunkBuffer allocate(int capacity, int increment) { if (increment > 0 && increment < capacity) { return new IncrementalChunkBuffer(capacity, increment, false); } - return new ChunkBufferImplWithByteBuffer(ByteBuffer.allocate(capacity)); + CodecBuffer codecBuffer = CodecBuffer.allocateDirect(capacity); + return new ChunkBufferImplWithByteBuffer(codecBuffer.asWritableByteBuffer(), codecBuffer); } /** Wrap the given {@link ByteBuffer} as a {@link ChunkBuffer}. */ @@ -86,6 +89,9 @@ default boolean hasRemaining() { /** Similar to {@link ByteBuffer#clear()}. */ ChunkBuffer clear(); + default void close() { + } + /** Similar to {@link ByteBuffer#put(ByteBuffer)}. */ ChunkBuffer put(ByteBuffer b); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java index 0cf49681cb16..fe2ee5fa8acb 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferImplWithByteBuffer.java @@ -28,13 +28,27 @@ import java.util.function.Function; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.UncheckedAutoCloseable; /** {@link ChunkBuffer} implementation using a single {@link ByteBuffer}. */ final class ChunkBufferImplWithByteBuffer implements ChunkBuffer { private final ByteBuffer buffer; + private final UncheckedAutoCloseable underlying; ChunkBufferImplWithByteBuffer(ByteBuffer buffer) { + this(buffer, null); + } + + ChunkBufferImplWithByteBuffer(ByteBuffer buffer, UncheckedAutoCloseable underlying) { this.buffer = Objects.requireNonNull(buffer, "buffer == null"); + this.underlying = underlying; + } + + @Override + public void close() { + if (underlying != null) { + underlying.close(); + } } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 5a63c09f1234..dda4fae0d2b5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.common; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import java.io.IOException; @@ -47,6 +48,8 @@ final class IncrementalChunkBuffer implements ChunkBuffer { private final int limitIndex; /** Buffer list to be allocated incrementally. */ private final List buffers; + /** The underlying buffers. */ + private final List underlying; /** Is this a duplicated buffer? (for debug only) */ private final boolean isDuplicated; /** The index of the first non-full buffer. */ @@ -58,11 +61,18 @@ final class IncrementalChunkBuffer implements ChunkBuffer { this.limit = limit; this.increment = increment; this.limitIndex = limit / increment; - this.buffers = new ArrayList<>( - limitIndex + (limit % increment == 0 ? 0 : 1)); + int size = limitIndex + (limit % increment == 0 ? 0 : 1); + this.buffers = new ArrayList<>(size); + this.underlying = isDuplicated ? Collections.emptyList() : new ArrayList<>(size); this.isDuplicated = isDuplicated; } + @Override + public void close() { + underlying.forEach(CodecBuffer::release); + underlying.clear(); + } + /** @return the capacity for the buffer at the given index. */ private int getBufferCapacityAtIndex(int i) { Preconditions.checkArgument(i >= 0); @@ -99,6 +109,7 @@ private ByteBuffer getAtIndex(int i) { /** @return the i-th buffer. It may allocate buffers. */ private ByteBuffer getAndAllocateAtIndex(int index) { + Preconditions.checkState(!isDuplicated, "Duplicated buffer is readonly."); Preconditions.checkArgument(index >= 0); // never allocate over limit if (limit % increment == 0) { @@ -115,7 +126,9 @@ private ByteBuffer getAndAllocateAtIndex(int index) { // allocate upto the given index ByteBuffer b = null; for (; i <= index; i++) { - b = ByteBuffer.allocate(getBufferCapacityAtIndex(i)); + final CodecBuffer c = CodecBuffer.allocateDirect(getBufferCapacityAtIndex(i)); + underlying.add(c); + b = c.asWritableByteBuffer(); buffers.add(b); } return b; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java index 3d6d38f3d3bd..b5212825e58b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java @@ -29,7 +29,11 @@ import org.apache.hadoop.hdds.utils.MockGatheringChannel; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -46,6 +50,16 @@ private static int nextInt(int n) { return ThreadLocalRandom.current().nextInt(n); } + @BeforeAll + public static void beforeAll() { + CodecBuffer.enableLeakDetection(); + } + + @AfterEach + public void after() throws Exception { + CodecTestUtil.gc(); + } + @Test @Timeout(1) void testImplWithByteBuffer() throws IOException { @@ -59,7 +73,9 @@ void testImplWithByteBuffer() throws IOException { private static void runTestImplWithByteBuffer(int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); - runTestImpl(expected, 0, ChunkBuffer.allocate(n)); + try (ChunkBuffer c = ChunkBuffer.allocate(n)) { + runTestImpl(expected, 0, c); + } } @Test @@ -78,8 +94,9 @@ void testIncrementalChunkBuffer() throws IOException { private static void runTestIncrementalChunkBuffer(int increment, int n) throws IOException { final byte[] expected = new byte[n]; ThreadLocalRandom.current().nextBytes(expected); - runTestImpl(expected, increment, - new IncrementalChunkBuffer(n, increment, false)); + try (IncrementalChunkBuffer c = new IncrementalChunkBuffer(n, increment, false)) { + runTestImpl(expected, increment, c); + } } @Test diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index 3be931c13211..b3c15a46f76f 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -22,6 +22,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.db.CodecBuffer; +import org.apache.hadoop.hdds.utils.db.CodecTestUtil; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; @@ -44,6 +46,8 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.metadata.DatanodeSchemaThreeDBDefinition; import org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaThreeImpl; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -135,6 +139,16 @@ public void setup() throws Exception { chunkManager = new FilePerBlockStrategy(true, blockManager, null); } + @BeforeAll + public static void beforeClass() { + CodecBuffer.enableLeakDetection(); + } + + @AfterEach + public void after() throws Exception { + CodecTestUtil.gc(); + } + @Test public void testUpgrade() throws IOException { int num = 2; @@ -187,7 +201,7 @@ private Map putAnyBlockData(KeyValueContainerData data, private void putChunksInBlock(int numOfChunksPerBlock, int i, List chunks, KeyValueContainer container, BlockID blockID) { - long chunkLength = 100; + final long chunkLength = 100; try { for (int k = 0; k < numOfChunksPerBlock; k++) { final String chunkName = String.format("%d_chunk_%d_block_%d", @@ -199,11 +213,10 @@ private void putChunksInBlock(int numOfChunksPerBlock, int i, .setChecksumData(Checksum.getNoChecksumDataProto()).build(); chunks.add(info); ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength); - final ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); - chunkManager - .writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + try (ChunkBuffer chunkData = ChunkBuffer.allocate((int) chunkLength)) { + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, COMMIT_STAGE); + } } } catch (IOException ex) { LOG.warn("Putting chunks in blocks was not successful for BlockID: " diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 1363dc2269a4..2b13daaca291 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -237,6 +237,8 @@ public void testReleaseBuffers() throws Exception { assertThat(watcher.getFutureMap()).isEmpty(); assertThat(watcher.getCommitIndexMap()).isEmpty(); } + } finally { + bufferPool.clearBufferPool(); } } @@ -330,6 +332,8 @@ public void testReleaseBuffersOnException() throws Exception { assertThat(watcher.getCommitIndexMap()).isEmpty(); } } + } finally { + bufferPool.clearBufferPool(); } } } From d3e2e59c1c78866165819fcc6774631316f2e586 Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Fri, 9 Feb 2024 23:33:20 +0530 Subject: [PATCH 010/108] HDDS-10319. Also consider bucket layout deciding whether to normalize path for listKeys (#6195) --- .../ozone/freon/TestOmBucketReadWriteKeyOps.java | 13 ++++++++----- .../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 10 +++++++--- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java index b74022b83e5d..3c7a04071b3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOmBucketReadWriteKeyOps.java @@ -34,7 +34,8 @@ import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.raftlog.RaftLog; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; @@ -88,8 +89,9 @@ private void shutdown() { * * @throws IOException */ - private void startCluster() throws Exception { + private void startCluster(boolean fsPathsEnabled) throws Exception { conf = getOzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, fsPathsEnabled); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.OBJECT_STORE.name()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); @@ -104,10 +106,11 @@ private OzoneConfiguration getOzoneConfiguration() { return new OzoneConfiguration(); } - @Test - public void testOmBucketReadWriteKeyOps() throws Exception { + @ParameterizedTest(name = "Filesystem Paths Enabled: {0}") + @ValueSource(booleans = {false, true}) + public void testOmBucketReadWriteKeyOps(boolean fsPathsEnabled) throws Exception { try { - startCluster(); + startCluster(fsPathsEnabled); FileOutputStream out = FileUtils.openOutputStream(new File(path, "conf")); cluster.getConf().writeXml(out); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index a84fdaf1a61c..d932ed1eff58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -609,13 +609,17 @@ public ListKeysResult listKeys(String volumeName, String bucketName, int maxKeys) throws IOException { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - + OmBucketInfo omBucketInfo = getBucketInfo(volumeName, bucketName); + if (omBucketInfo == null) { + throw new OMException("Bucket " + bucketName + " not found.", + ResultCodes.BUCKET_NOT_FOUND); + } + BucketLayout bucketLayout = omBucketInfo.getBucketLayout(); // We don't take a lock in this path, since we walk the // underlying table using an iterator. That automatically creates a // snapshot of the data, so we don't need these locks at a higher level // when we iterate. - - if (enableFileSystemPaths) { + if (bucketLayout.shouldNormalizePaths(enableFileSystemPaths)) { startKey = OmUtils.normalizeKey(startKey, true); keyPrefix = OmUtils.normalizeKey(keyPrefix, true); } From 2f2234c7b61714404399ada8f31b3fb4772b613a Mon Sep 17 00:00:00 2001 From: Cyrill Date: Fri, 9 Feb 2024 22:04:16 +0300 Subject: [PATCH 011/108] HDDS-10262. Encapsulate SnapshotCache inside OmSnapshotManager (#6135) --- .../hadoop/ozone/freon/TestOMSnapshotDAG.java | 41 +++---- .../om/TestSnapshotBackgroundServices.java | 16 +-- .../ozone/om/TestSnapshotDeletingService.java | 23 ++-- .../ozone/om/snapshot/TestOmSnapshot.java | 4 +- .../ozone/om/OmMetadataManagerImpl.java | 19 ++- .../hadoop/ozone/om/OmSnapshotManager.java | 110 +++++++++++++----- .../apache/hadoop/ozone/om/OzoneManager.java | 47 +++----- .../hadoop/ozone/om/SstFilteringService.java | 18 +-- .../ozone/om/request/OMClientRequest.java | 5 +- .../snapshot/OMSnapshotPurgeRequest.java | 3 +- .../OMDirectoriesPurgeResponseWithFSO.java | 12 +- .../om/response/key/OMKeyPurgeResponse.java | 12 +- .../OMSnapshotMoveDeletedKeysResponse.java | 21 ++-- .../om/service/DirectoryDeletingService.java | 10 +- .../ozone/om/service/KeyDeletingService.java | 32 ++--- .../om/service/SnapshotDeletingService.java | 23 ++-- .../SnapshotDirectoryCleaningService.java | 32 ++--- .../ozone/om/snapshot/ReferenceCounted.java | 6 +- .../ozone/om/snapshot/SnapshotCache.java | 46 +++----- .../om/snapshot/SnapshotDiffManager.java | 24 ++-- .../ozone/om/TestOmSnapshotManager.java | 12 +- .../ozone/om/TestSstFilteringService.java | 12 +- .../key/TestOMKeyPurgeRequestAndResponse.java | 12 +- .../om/request/key/TestOMKeyRequest.java | 3 +- .../s3/multipart/TestS3MultipartRequest.java | 3 +- .../snapshot/TestOMSnapshotDeleteRequest.java | 3 - ...TestOMSnapshotPurgeRequestAndResponse.java | 3 +- .../om/service/TestKeyDeletingService.java | 10 +- .../ozone/om/snapshot/TestSnapshotCache.java | 50 +++----- .../om/snapshot/TestSnapshotDiffManager.java | 76 ++++++------ 30 files changed, 304 insertions(+), 384 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index bca21aebd1ac..c566cae414fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -29,7 +29,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -39,7 +38,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.test.GenericTestUtils; @@ -215,20 +213,16 @@ public void testDAGReconstruction() OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); RDBStore rdbStore = (RDBStore) omMetadataManager.getStore(); RocksDBCheckpointDiffer differ = rdbStore.getRocksDBCheckpointDiffer(); - ReferenceCounted - snapDB1 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap1")); - ReferenceCounted - snapDB2 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap2")); + ReferenceCounted snapDB1 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap1"); + ReferenceCounted snapDB2 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap2"); DifferSnapshotInfo snap1 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap1", - ((RDBStore)((OmSnapshot)snapDB1.get()) + ((RDBStore) snapDB1.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); DifferSnapshotInfo snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore)((OmSnapshot)snapDB2.get()) + volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); // RocksDB does checkpointing in a separate thread, wait for it @@ -247,13 +241,11 @@ public void testDAGReconstruction() resp = store.createSnapshot(volumeName, bucketName, "snap3"); LOG.debug("Snapshot created: {}", resp); - ReferenceCounted - snapDB3 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap3")); + ReferenceCounted snapDB3 = ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, "snap3"); DifferSnapshotInfo snap3 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap3", - ((RDBStore)((OmSnapshot)snapDB3.get()) + ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); final File checkpointSnap3 = new File(snap3.getDbPath()); GenericTestUtils.waitFor(checkpointSnap3::exists, 2000, 20000); @@ -274,24 +266,21 @@ public void testDAGReconstruction() ozoneManager = cluster.getOzoneManager(); omMetadataManager = ozoneManager.getMetadataManager(); snapDB1 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap1")); + .getActiveSnapshot(volumeName, bucketName, "snap1"); snapDB2 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap2")); + .getActiveSnapshot(volumeName, bucketName, "snap2"); snap1 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap1", - ((RDBStore)((OmSnapshot)snapDB1.get()) + ((RDBStore) snapDB1.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); snap2 = getDifferSnapshotInfo(omMetadataManager, - volumeName, bucketName, "snap2", ((RDBStore)((OmSnapshot)snapDB2.get()) + volumeName, bucketName, "snap2", ((RDBStore) snapDB2.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); snapDB3 = ozoneManager.getOmSnapshotManager() - .getSnapshotCache().get( - SnapshotInfo.getTableKey(volumeName, bucketName, "snap3")); + .getActiveSnapshot(volumeName, bucketName, "snap3"); snap3 = getDifferSnapshotInfo(omMetadataManager, volumeName, bucketName, "snap3", - ((RDBStore)((OmSnapshot)snapDB3.get()) + ((RDBStore) snapDB3.get() .getMetadataManager().getStore()).getDb().getManagedRocksDb()); List sstDiffList21Run2 = differ.getSSTDiffList(snap2, snap1); assertEquals(sstDiffList21, sstDiffList21Run2); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java index a7bc55446413..83386693d7dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java @@ -41,7 +41,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; import org.apache.ozone.compaction.log.CompactionLogEntry; @@ -76,7 +75,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.TestOzoneManagerHAWithStoppedNodes.createKey; import static org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -259,12 +257,11 @@ public void testSnapshotAndKeyDeletionBackgroundServices() // get snapshot c OmSnapshot snapC; - try (ReferenceCounted rcC = newLeaderOM + try (ReferenceCounted rcC = newLeaderOM .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfoC.getName()), true)) { + .getSnapshot(volumeName, bucketName, snapshotInfoC.getName())) { assertNotNull(rcC); - snapC = (OmSnapshot) rcC.get(); + snapC = rcC.get(); } // assert that key a is in snapshot c's deleted table @@ -284,12 +281,11 @@ public void testSnapshotAndKeyDeletionBackgroundServices() // get snapshot d OmSnapshot snapD; - try (ReferenceCounted rcD = newLeaderOM + try (ReferenceCounted rcD = newLeaderOM .getOmSnapshotManager() - .checkForSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfoD.getName()), true)) { + .getSnapshot(volumeName, bucketName, snapshotInfoD.getName())) { assertNotNull(rcD); - snapD = (OmSnapshot) rcD.get(); + snapD = rcD.get(); } // wait until key a appears in deleted table of snapshot d diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index e627a880fd21..9f697d4148b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -58,7 +57,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -133,9 +131,8 @@ public void testSnapshotSplitAndMove() throws Exception { GenericTestUtils.waitFor(() -> snapshotDeletingService .getSuccessfulRunCount() >= 1, 1000, 10000); - OmSnapshot bucket1snap3 = (OmSnapshot) om.getOmSnapshotManager() - .checkForSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, - getSnapshotPrefix("bucket1snap3"), true).get(); + OmSnapshot bucket1snap3 = om.getOmSnapshotManager() + .getSnapshot(VOLUME_NAME, BUCKET_NAME_ONE, "bucket1snap3").get(); // Check bucket1key1 added to next non deleted snapshot db. List> omKeyInfos = @@ -190,8 +187,7 @@ public void testMultipleSnapshotKeyReclaim() throws Exception { // verify the cache of purged snapshot // /vol1/bucket2/bucket2snap1 has been cleaned up from cache map - SnapshotCache snapshotCache = om.getOmSnapshotManager().getSnapshotCache(); - assertEquals(2, snapshotCache.size()); + assertEquals(2, om.getOmSnapshotManager().getSnapshotCacheSize()); } @SuppressWarnings("checkstyle:MethodLength") @@ -359,9 +355,8 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(om.getMetadataManager().getSnapshotInfoTable(), 2); verifySnapshotChain(deletedSnap, "/vol1/bucket2/snap3"); - OmSnapshot snap3 = (OmSnapshot) om.getOmSnapshotManager() - .checkForSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, - getSnapshotPrefix("snap3"), true).get(); + OmSnapshot snap3 = om.getOmSnapshotManager() + .getSnapshot(VOLUME_NAME, BUCKET_NAME_TWO, "snap3").get(); Table snapDeletedDirTable = snap3.getMetadataManager().getDeletedDirTable(); @@ -388,10 +383,10 @@ public void testSnapshotWithFSO() throws Exception { assertTableRowCount(renamedTable, 4); assertTableRowCount(deletedDirTable, 3); - ReferenceCounted rcSnap1 = - om.getOmSnapshotManager().checkForSnapshot( - VOLUME_NAME, BUCKET_NAME_TWO, getSnapshotPrefix("snap1"), true); - OmSnapshot snap1 = (OmSnapshot) rcSnap1.get(); + ReferenceCounted rcSnap1 = + om.getOmSnapshotManager().getSnapshot( + VOLUME_NAME, BUCKET_NAME_TWO, "snap1"); + OmSnapshot snap1 = rcSnap1.get(); Table snap1KeyTable = snap1.getMetadataManager().getFileTable(); try (TableIterator + try (ReferenceCounted rcLatestSnapshot = getLatestActiveSnapshot( keySplit[1], keySplit[2], omSnapshotManager)) { @@ -1573,13 +1571,12 @@ public PendingKeysDeletion getPendingDeletionKeys(final int keyCount, if (rcLatestSnapshot != null) { Table prevKeyTable = - ((OmSnapshot) rcLatestSnapshot.get()) + rcLatestSnapshot.get() .getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); Table prevDeletedTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDeletedTable(); + rcLatestSnapshot.get().getMetadataManager().getDeletedTable(); String prevKeyTableDBKey = getSnapshotRenamedTable() .get(dbRenameKey); String prevDelTableDBKey = getOzoneKey(info.getVolumeName(), @@ -1665,8 +1662,7 @@ private boolean versionExistsInPreviousSnapshot(OmKeyInfo omKeyInfo, /** * Get the latest OmSnapshot for a snapshot path. */ - public ReferenceCounted< - IOmMetadataReader, SnapshotCache> getLatestActiveSnapshot( + public ReferenceCounted getLatestActiveSnapshot( String volumeName, String bucketName, OmSnapshotManager snapshotManager) throws IOException { @@ -1700,13 +1696,12 @@ IOmMetadataReader, SnapshotCache> getLatestActiveSnapshot( } } - Optional> rcOmSnapshot = + Optional> rcOmSnapshot = snapshotInfo.isPresent() ? Optional.ofNullable( - snapshotManager.checkForSnapshot(volumeName, + snapshotManager.getSnapshot(volumeName, bucketName, - getSnapshotPrefix(snapshotInfo.get().getName()), - true) + snapshotInfo.get().getName()) ) : Optional.empty(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 2dab56ede67b..e22d6b309739 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -271,10 +271,10 @@ public OmSnapshotManager(OzoneManager ozoneManager) { }; // Init snapshot cache - this.snapshotCache = new SnapshotCache(this, loader, softCacheSize); + this.snapshotCache = new SnapshotCache(loader, softCacheSize); this.snapshotDiffManager = new SnapshotDiffManager(snapshotDiffDb, differ, - ozoneManager, snapshotCache, snapDiffJobCf, snapDiffReportCf, + ozoneManager, snapDiffJobCf, snapDiffReportCf, columnFamilyOptions, codecRegistry); diffCleanupServiceInterval = ozoneManager.getConfiguration() @@ -397,11 +397,32 @@ private static CodecRegistry createCodecRegistryForSnapDiff() { } /** - * Get snapshot instance LRU cache. - * @return LoadingCache + * Get snapshot instance LRU cache size. + * @return cache size. */ - public SnapshotCache getSnapshotCache() { - return snapshotCache; + @VisibleForTesting + public int getSnapshotCacheSize() { + return snapshotCache == null ? 0 : snapshotCache.size(); + } + + /** + * Immediately invalidate all entries and close their DB instances in cache. + */ + public void invalidateCache() { + if (snapshotCache != null) { + snapshotCache.invalidateAll(); + } + } + + /** + * Immediately invalidate an entry. + * + * @param key DB snapshot table key + */ + public void invalidateCacheEntry(String key) throws IOException { + if (snapshotCache != null) { + snapshotCache.invalidate(key); + } } /** @@ -590,11 +611,11 @@ private static void deleteKeysFromDelKeyTableInSnapshotScope( } // Get OmSnapshot if the keyName has ".snapshot" key indicator - public ReferenceCounted checkForSnapshot( + @SuppressWarnings("unchecked") + public ReferenceCounted getActiveFsMetadataOrSnapshot( String volumeName, String bucketName, - String keyName, - boolean skipActiveCheck) throws IOException { + String keyName) throws IOException { if (keyName == null || !ozoneManager.isFilesystemSnapshotEnabled()) { return ozoneManager.getOmMetadataReader(); } @@ -603,31 +624,58 @@ public ReferenceCounted checkForSnapshot( String[] keyParts = keyName.split(OM_KEY_PREFIX); if (isSnapshotKey(keyParts)) { String snapshotName = keyParts[1]; - if (snapshotName == null || snapshotName.isEmpty()) { - // don't allow snapshot indicator without snapshot name - throw new OMException(INVALID_KEY_NAME); - } - String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, - bucketName, snapshotName); - - // Block FS API reads when snapshot is not active. - if (!skipActiveCheck) { - checkSnapshotActive(ozoneManager, snapshotTableKey); - } - // Warn if actual cache size exceeds the soft limit already. - if (snapshotCache.size() > softCacheSize) { - LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", - snapshotCache.size(), softCacheSize); - } - - // retrieve the snapshot from the cache - return snapshotCache.get(snapshotTableKey, skipActiveCheck); + return (ReferenceCounted) (ReferenceCounted) + getActiveSnapshot(volumeName, bucketName, snapshotName); } else { return ozoneManager.getOmMetadataReader(); } } + public ReferenceCounted getActiveSnapshot( + String volumeName, + String bucketName, + String snapshotName) throws IOException { + return getSnapshot(volumeName, bucketName, snapshotName, false); + } + + public ReferenceCounted getSnapshot( + String volumeName, + String bucketName, + String snapshotName) throws IOException { + return getSnapshot(volumeName, bucketName, snapshotName, true); + } + + private ReferenceCounted getSnapshot( + String volumeName, + String bucketName, + String snapshotName, + boolean skipActiveCheck) throws IOException { + + if (snapshotName == null || snapshotName.isEmpty()) { + // don't allow snapshot indicator without snapshot name + throw new OMException(INVALID_KEY_NAME); + } + + String snapshotTableKey = SnapshotInfo.getTableKey(volumeName, + bucketName, snapshotName); + + return getSnapshot(snapshotTableKey, skipActiveCheck); + } + + private ReferenceCounted getSnapshot( + String snapshotTableKey, + boolean skipActiveCheck) throws IOException { + + // Block FS API reads when snapshot is not active. + if (!skipActiveCheck) { + checkSnapshotActive(ozoneManager, snapshotTableKey); + } + + // retrieve the snapshot from the cache + return snapshotCache.get(snapshotTableKey); + } + /** * Returns true if the snapshot is in given status. * @param key DB snapshot table key @@ -894,9 +942,9 @@ public void close() { if (snapshotDiffManager != null) { snapshotDiffManager.close(); } - if (snapshotCache != null) { - snapshotCache.invalidateAll(); - } + + invalidateCache(); + if (snapshotDiffCleanupService != null) { snapshotDiffCleanupService.shutdown(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 410e08a4db54..c4e9eb2ed3e2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -100,7 +100,6 @@ import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.security.acl.OzoneAuthorizerFactory; import org.apache.hadoop.ozone.snapshot.CancelSnapshotDiffResponse; @@ -485,7 +484,7 @@ private enum State { private OmMetadataReader omMetadataReader; // Wrap active DB metadata reader in ReferenceCounted once to avoid // instance creation every single time. - private ReferenceCounted rcOmMetadataReader; + private ReferenceCounted rcOmMetadataReader; private OmSnapshotManager omSnapshotManager; @SuppressWarnings("methodlength") @@ -2580,8 +2579,7 @@ public boolean getAllowListAllVolumes() { return allowListAllVolumes; } - public ReferenceCounted< - IOmMetadataReader, SnapshotCache> getOmMetadataReader() { + public ReferenceCounted getOmMetadataReader() { return rcOmMetadataReader; } @@ -2851,8 +2849,7 @@ public OmBucketInfo getBucketInfo(String volume, String bucket) */ @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { - try (ReferenceCounted - rcReader = getReader(args)) { + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().lookupKey(args); } } @@ -2864,8 +2861,7 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, boolean assumeS3Context) throws IOException { - try (ReferenceCounted rcReader = - getReader(args)) { + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().getKeyInfo(args, assumeS3Context); } } @@ -2877,7 +2873,7 @@ public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, public ListKeysResult listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(volumeName, bucketName, keyPrefix)) { return rcReader.get().listKeys( volumeName, bucketName, startKey, keyPrefix, maxKeys); @@ -3629,7 +3625,7 @@ public OmMultipartUploadList listMultipartUploads(String volumeName, */ @Override public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().getFileStatus(args); } @@ -3640,7 +3636,7 @@ public OzoneFileStatus getFileStatus(OmKeyArgs args) throws IOException { */ @Override public OmKeyInfo lookupFile(OmKeyArgs args) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().lookupFile(args); } @@ -3659,7 +3655,7 @@ public List listStatus(OmKeyArgs args, boolean recursive, public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(args)) { return rcReader.get().listStatus( args, recursive, startKey, numEntries, allowPartialPrefixes); @@ -3683,7 +3679,7 @@ public List listStatusLight(OmKeyArgs args, */ @Override public List getAcl(OzoneObj obj) throws IOException { - try (ReferenceCounted rcReader = + try (ReferenceCounted rcReader = getReader(obj)) { return rcReader.get().getAcl(obj); } @@ -3751,7 +3747,7 @@ TermIndex installCheckpoint(String leaderId, Path checkpointLocation, keyManager.stop(); stopSecretManager(); stopTrashEmptier(); - omSnapshotManager.getSnapshotCache().invalidateAll(); + omSnapshotManager.invalidateCache(); // Pause the State Machine so that no new transactions can be applied. // This action also clears the OM Double Buffer so that if there are any // pending transactions in the buffer, they are discarded. @@ -4705,12 +4701,10 @@ public static HddsProtos.OzoneManagerDetailsProto getOmDetailsProto( * @param keyArgs OmKeyArgs * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader(OmKeyArgs keyArgs) + private ReferenceCounted getReader(OmKeyArgs keyArgs) throws IOException { - return omSnapshotManager.checkForSnapshot( - keyArgs.getVolumeName(), keyArgs.getBucketName(), keyArgs.getKeyName(), - false); + return omSnapshotManager.getActiveFsMetadataOrSnapshot( + keyArgs.getVolumeName(), keyArgs.getBucketName(), keyArgs.getKeyName()); } /** @@ -4722,11 +4716,10 @@ IOmMetadataReader, SnapshotCache> getReader(OmKeyArgs keyArgs) * @param key key path * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader( + private ReferenceCounted getReader( String volumeName, String bucketName, String key) throws IOException { - return omSnapshotManager.checkForSnapshot( - volumeName, bucketName, key, false); + return omSnapshotManager.getActiveFsMetadataOrSnapshot( + volumeName, bucketName, key); } /** @@ -4736,14 +4729,12 @@ IOmMetadataReader, SnapshotCache> getReader( * @param ozoneObj OzoneObj * @return ReferenceCounted */ - private ReferenceCounted< - IOmMetadataReader, SnapshotCache> getReader(OzoneObj ozoneObj) + private ReferenceCounted getReader(OzoneObj ozoneObj) throws IOException { - return omSnapshotManager.checkForSnapshot( + return omSnapshotManager.getActiveFsMetadataOrSnapshot( ozoneObj.getVolumeName(), ozoneObj.getBucketName(), - ozoneObj.getKeyName(), - false); + ozoneObj.getKeyName()); } @SuppressWarnings("parameternumber") diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index cae9bc4b3fca..20d0ab0e53eb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -147,10 +146,9 @@ private void markSSTFilteredFlagForSnapshot(String volume, String bucket, @Override public BackgroundTaskResult call() throws Exception { - Optional snapshotCache = Optional.ofNullable(ozoneManager) - .map(OzoneManager::getOmSnapshotManager) - .map(OmSnapshotManager::getSnapshotCache); - if (!snapshotCache.isPresent()) { + Optional snapshotManager = Optional.ofNullable(ozoneManager) + .map(OzoneManager::getOmSnapshotManager); + if (!snapshotManager.isPresent()) { return BackgroundTaskResult.EmptyTaskResult.newResult(); } Table snapshotInfoTable = @@ -183,10 +181,12 @@ public BackgroundTaskResult call() throws Exception { snapshotInfo.getBucketName()); try ( - ReferenceCounted - snapshotMetadataReader = snapshotCache.get().get( - snapshotInfo.getTableKey())) { - OmSnapshot omSnapshot = (OmSnapshot) snapshotMetadataReader.get(); + ReferenceCounted snapshotMetadataReader = + snapshotManager.get().getActiveSnapshot( + snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), + snapshotInfo.getName())) { + OmSnapshot omSnapshot = snapshotMetadataReader.get(); RDBStore rdbStore = (RDBStore) omSnapshot.getMetadataManager() .getStore(); RocksDatabase db = rdbStore.getDb(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index 2698d12f9f89..d0dd2caa54a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -42,7 +42,6 @@ import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.LayoutVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -296,7 +295,7 @@ protected void checkACLsWithFSO(OzoneManager ozoneManager, String volumeName, contextBuilder.setOwnerName(bucketOwner); } - try (ReferenceCounted rcMetadataReader = + try (ReferenceCounted rcMetadataReader = ozoneManager.getOmMetadataReader()) { OmMetadataReader omMetadataReader = (OmMetadataReader) rcMetadataReader.get(); @@ -362,7 +361,7 @@ public void checkAcls(OzoneManager ozoneManager, String bucketOwner) throws IOException { - try (ReferenceCounted rcMetadataReader = + try (ReferenceCounted rcMetadataReader = ozoneManager.getOmMetadataReader()) { OzoneAclUtils.checkAllAcls((OmMetadataReader) rcMetadataReader.get(), resType, storeType, aclType, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index b7dba8260269..1533ceebe336 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -91,8 +91,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn trxnLogIndex, updatedSnapInfos); updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); - ozoneManager.getOmSnapshotManager().getSnapshotCache() - .invalidate(snapTableKey); + ozoneManager.getOmSnapshotManager().invalidateCacheEntry(snapTableKey); } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java index bb9562dff21a..848c5c308906 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMDirectoriesPurgeResponseWithFSO.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -36,7 +35,6 @@ import org.apache.hadoop.ozone.om.request.key.OMDirectoriesPurgeRequestWithFSO; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; @@ -50,7 +48,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Response for {@link OMDirectoriesPurgeRequestWithFSO} request. @@ -86,13 +83,12 @@ public void addToDBBatch(OMMetadataManager metadataManager, ((OmMetadataManagerImpl) metadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted - rcFromSnapshotInfo = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcFromSnapshotInfo = omSnapshotManager.getSnapshot( fromSnapshotInfo.getVolumeName(), fromSnapshotInfo.getBucketName(), - getSnapshotPrefix(fromSnapshotInfo.getName()), - true)) { - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshotInfo.get(); + fromSnapshotInfo.getName())) { + OmSnapshot fromSnapshot = rcFromSnapshotInfo.get(); DBStore fromSnapshotStore = fromSnapshot.getMetadataManager() .getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index 4e9ee7563310..b16ba95d78f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -29,7 +28,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; @@ -41,7 +39,6 @@ import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotMoveDeletedKeysResponse.createRepeatedOmKeyInfo; /** @@ -81,14 +78,13 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.checkForSnapshot( + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot( fromSnapshot.getVolumeName(), fromSnapshot.getBucketName(), - getSnapshotPrefix(fromSnapshot.getName()), - true)) { + fromSnapshot.getName())) { - OmSnapshot fromOmSnapshot = (OmSnapshot) rcOmFromSnapshot.get(); + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); DBStore fromSnapshotStore = fromOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java index 1255e4ae7f41..3726faacfd70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotMoveDeletedKeysResponse.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -32,7 +31,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -42,7 +40,6 @@ import java.util.List; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Response for OMSnapshotMoveDeletedKeysRequest. @@ -93,24 +90,22 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager(); - try (ReferenceCounted rcOmFromSnapshot = - omSnapshotManager.checkForSnapshot( + try (ReferenceCounted rcOmFromSnapshot = + omSnapshotManager.getSnapshot( fromSnapshot.getVolumeName(), fromSnapshot.getBucketName(), - getSnapshotPrefix(fromSnapshot.getName()), - true)) { + fromSnapshot.getName())) { - OmSnapshot fromOmSnapshot = (OmSnapshot) rcOmFromSnapshot.get(); + OmSnapshot fromOmSnapshot = rcOmFromSnapshot.get(); if (nextSnapshot != null) { - try (ReferenceCounted - rcOmNextSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcOmNextSnapshot = omSnapshotManager.getSnapshot( nextSnapshot.getVolumeName(), nextSnapshot.getBucketName(), - getSnapshotPrefix(nextSnapshot.getName()), - true)) { + nextSnapshot.getName())) { - OmSnapshot nextOmSnapshot = (OmSnapshot) rcOmNextSnapshot.get(); + OmSnapshot nextOmSnapshot = rcOmNextSnapshot.get(); RDBStore nextSnapshotStore = (RDBStore) nextOmSnapshot.getMetadataManager().getStore(); // Init Batch Operation for snapshot db. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java index 9643fa82969c..d7205b2c1bbf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -35,7 +34,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.util.Time; import org.apache.ratis.protocol.ClientId; @@ -238,7 +236,7 @@ private boolean previousSnapshotHasDir( OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) getOzoneManager().getMetadataManager(); - try (ReferenceCounted rcLatestSnapshot = + try (ReferenceCounted rcLatestSnapshot = metadataManager.getLatestActiveSnapshot( deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), @@ -249,11 +247,9 @@ private boolean previousSnapshotHasDir( .getRenameKey(deletedDirInfo.getVolumeName(), deletedDirInfo.getBucketName(), deletedDirInfo.getObjectID()); Table prevDirTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDirectoryTable(); + rcLatestSnapshot.get().getMetadataManager().getDirectoryTable(); Table prevDeletedDirTable = - ((OmSnapshot) rcLatestSnapshot.get()) - .getMetadataManager().getDeletedDirTable(); + rcLatestSnapshot.get().getMetadataManager().getDeletedDirTable(); OmKeyInfo prevDeletedDirInfo = prevDeletedDirTable.get(key); if (prevDeletedDirInfo != null) { return true; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index e89608e82db2..83991668c9f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -46,7 +45,6 @@ import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; @@ -58,7 +56,6 @@ import com.google.common.annotations.VisibleForTesting; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; @@ -264,13 +261,12 @@ private void processSnapshotDeepClean(int delCount) continue; } - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.getSnapshot( currSnapInfo.getVolumeName(), currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - true)) { - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + currSnapInfo.getName())) { + OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); Table snapDeletedTable = currOmSnapshot.getMetadataManager().getDeletedTable(); @@ -304,18 +300,16 @@ private void processSnapshotDeepClean(int delCount) Table previousKeyTable = null; Table prevRenamedTable = null; - ReferenceCounted - rcPrevOmSnapshot = null; + ReferenceCounted rcPrevOmSnapshot = null; // Split RepeatedOmKeyInfo and update current snapshot // deletedKeyTable and next snapshot deletedKeyTable. if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevOmSnapshot = omSnapshotManager.getSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), true); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); + previousSnapshot.getName()); + OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); previousKeyTable = omPreviousSnapshot.getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); @@ -324,15 +318,13 @@ private void processSnapshotDeepClean(int delCount) } Table previousToPrevKeyTable = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; + ReferenceCounted rcPrevToPrevOmSnapshot = null; if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevToPrevOmSnapshot = omSnapshotManager.getSnapshot( previousToPrevSnapshot.getVolumeName(), previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), true); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); + previousToPrevSnapshot.getName()); + OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); previousToPrevKeyTable = omPreviousToPrevSnapshot .getMetadataManager() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index cc275b4e8e6a..29b2b319532b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.KeyManagerImpl; @@ -52,7 +51,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgePathRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveDeletedKeysRequest; @@ -78,7 +76,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; /** * Background Service to clean-up deleted snapshot and reclaim space. @@ -143,10 +140,8 @@ public BackgroundTaskResult call() throws InterruptedException { getRunCount().incrementAndGet(); - ReferenceCounted rcOmSnapshot = - null; - ReferenceCounted rcOmPreviousSnapshot = - null; + ReferenceCounted rcOmSnapshot = null; + ReferenceCounted rcOmPreviousSnapshot = null; Table snapshotInfoTable = ozoneManager.getMetadataManager().getSnapshotInfoTable(); @@ -169,12 +164,11 @@ public BackgroundTaskResult call() throws InterruptedException { // Note: Can refactor this to use try-with-resources. // Handling RC decrements manually for now to minimize conflicts. - rcOmSnapshot = omSnapshotManager.checkForSnapshot( + rcOmSnapshot = omSnapshotManager.getSnapshot( snapInfo.getVolumeName(), snapInfo.getBucketName(), - getSnapshotPrefix(snapInfo.getName()), - true); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); + snapInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); Table snapshotDeletedTable = omSnapshot.getMetadataManager().getDeletedTable(); @@ -226,12 +220,11 @@ public BackgroundTaskResult call() throws InterruptedException { // Split RepeatedOmKeyInfo and update current snapshot deletedKeyTable // and next snapshot deletedKeyTable. if (previousSnapshot != null) { - rcOmPreviousSnapshot = omSnapshotManager.checkForSnapshot( + rcOmPreviousSnapshot = omSnapshotManager.getSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), - true); - omPreviousSnapshot = (OmSnapshot) rcOmPreviousSnapshot.get(); + previousSnapshot.getName()); + omPreviousSnapshot = rcOmPreviousSnapshot.get(); previousKeyTable = omPreviousSnapshot .getMetadataManager().getKeyTable(bucketInfo.getBucketLayout()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java index 9a60f6303861..fe0f6e111ed3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -44,7 +43,6 @@ import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; @@ -63,7 +61,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; @@ -158,10 +155,8 @@ public BackgroundTaskResult call() { continue; } - ReferenceCounted - rcPrevOmSnapshot = null; - ReferenceCounted - rcPrevToPrevOmSnapshot = null; + ReferenceCounted rcPrevOmSnapshot = null; + ReferenceCounted rcPrevToPrevOmSnapshot = null; try { long volumeId = metadataManager .getVolumeId(currSnapInfo.getVolumeName()); @@ -189,12 +184,11 @@ public BackgroundTaskResult call() { Table prevRenamedTable = null; if (previousSnapshot != null) { - rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevOmSnapshot = omSnapshotManager.getActiveSnapshot( previousSnapshot.getVolumeName(), previousSnapshot.getBucketName(), - getSnapshotPrefix(previousSnapshot.getName()), false); - OmSnapshot omPreviousSnapshot = (OmSnapshot) - rcPrevOmSnapshot.get(); + previousSnapshot.getName()); + OmSnapshot omPreviousSnapshot = rcPrevOmSnapshot.get(); previousKeyTable = omPreviousSnapshot.getMetadataManager() .getKeyTable(bucketInfo.getBucketLayout()); @@ -206,12 +200,11 @@ public BackgroundTaskResult call() { Table previousToPrevKeyTable = null; if (previousToPrevSnapshot != null) { - rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + rcPrevToPrevOmSnapshot = omSnapshotManager.getActiveSnapshot( previousToPrevSnapshot.getVolumeName(), previousToPrevSnapshot.getBucketName(), - getSnapshotPrefix(previousToPrevSnapshot.getName()), false); - OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) - rcPrevToPrevOmSnapshot.get(); + previousToPrevSnapshot.getName()); + OmSnapshot omPreviousToPrevSnapshot = rcPrevToPrevOmSnapshot.get(); previousToPrevKeyTable = omPreviousToPrevSnapshot .getMetadataManager() @@ -220,14 +213,13 @@ public BackgroundTaskResult call() { String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager, currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); - try (ReferenceCounted - rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.getActiveSnapshot( currSnapInfo.getVolumeName(), currSnapInfo.getBucketName(), - getSnapshotPrefix(currSnapInfo.getName()), - false)) { + currSnapInfo.getName())) { - OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + OmSnapshot currOmSnapshot = rcCurrOmSnapshot.get(); Table snapDeletedDirTable = currOmSnapshot.getMetadataManager().getDeletedDirTable(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java index 808a5ed4c192..0a9d47fc861c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/ReferenceCounted.java @@ -25,7 +25,7 @@ /** * Add reference counter to an object instance. */ -public class ReferenceCounted +public class ReferenceCounted implements AutoCloseable { /** @@ -51,10 +51,10 @@ public class ReferenceCounted /** * Parent instance whose callback will be triggered upon this RC closure. */ - private final U parentWithCallback; + private final Object parentWithCallback; public ReferenceCounted(T obj, boolean disableCounter, - U parentWithCallback) { + Object parentWithCallback) { // A param to allow disabling ref counting to reduce active DB // access penalties due to AtomicLong operations. this.obj = obj; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 226acbb7dd1b..e776968fcaf4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -19,9 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheLoader; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -32,7 +30,6 @@ import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; /** * Thread-safe custom unbounded LRU cache to manage open snapshot DB instances. @@ -45,26 +42,23 @@ public class SnapshotCache { // Key: DB snapshot table key // Value: OmSnapshot instance, each holds a DB instance handle inside // TODO: [SNAPSHOT] Consider wrapping SoftReference<> around IOmMetadataReader - private final ConcurrentHashMap> dbMap; + private final ConcurrentHashMap> dbMap; - private final OmSnapshotManager omSnapshotManager; private final CacheLoader cacheLoader; // Soft-limit of the total number of snapshot DB instances allowed to be // opened on the OM. private final int cacheSizeLimit; public SnapshotCache( - OmSnapshotManager omSnapshotManager, CacheLoader cacheLoader, int cacheSizeLimit) { this.dbMap = new ConcurrentHashMap<>(); - this.omSnapshotManager = omSnapshotManager; this.cacheLoader = cacheLoader; this.cacheSizeLimit = cacheSizeLimit; } @VisibleForTesting - ConcurrentHashMap> getDbMap() { + ConcurrentHashMap> getDbMap() { return dbMap; } @@ -85,7 +79,7 @@ public void invalidate(String key) throws IOException { LOG.warn("Key: '{}' does not exist in cache.", k); } else { try { - ((OmSnapshot) v.get()).close(); + v.get().close(); } catch (IOException e) { throw new IllegalStateException("Failed to close snapshot: " + key, e); } @@ -98,12 +92,12 @@ public void invalidate(String key) throws IOException { * Immediately invalidate all entries and close their DB instances in cache. */ public void invalidateAll() { - Iterator>> + Iterator>> it = dbMap.entrySet().iterator(); while (it.hasNext()) { - Map.Entry> entry = it.next(); - OmSnapshot omSnapshot = (OmSnapshot) entry.getValue().get(); + Map.Entry> entry = it.next(); + OmSnapshot omSnapshot = entry.getValue().get(); try { // TODO: If wrapped with SoftReference<>, omSnapshot could be null? omSnapshot.close(); @@ -125,21 +119,22 @@ public enum Reason { GARBAGE_COLLECTION_WRITE } - public ReferenceCounted get(String key) throws IOException { - return get(key, false); - } - /** * Get or load OmSnapshot. Shall be close()d after use. * TODO: [SNAPSHOT] Can add reason enum to param list later. * @param key snapshot table key * @return an OmSnapshot instance, or null on error */ - public ReferenceCounted get(String key, boolean skipActiveCheck) + public ReferenceCounted get(String key) throws IOException { + // Warn if actual cache size exceeds the soft limit already. + if (size() > cacheSizeLimit) { + LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", + size(), cacheSizeLimit); + } // Atomic operation to initialize the OmSnapshot instance (once) if the key // does not exist, and increment the reference count on the instance. - ReferenceCounted rcOmSnapshot = + ReferenceCounted rcOmSnapshot = dbMap.compute(key, (k, v) -> { if (v == null) { LOG.info("Loading snapshot. Table key: {}", k); @@ -173,17 +168,6 @@ public ReferenceCounted get(String key, boolea OMException.ResultCodes.FILE_NOT_FOUND); } - // If the snapshot is already loaded in cache, the check inside the loader - // above is ignored. But we would still want to reject all get()s except - // when called from SDT (and some) if the snapshot is not active anymore. - if (!skipActiveCheck && !omSnapshotManager.isSnapshotStatus(key, SNAPSHOT_ACTIVE)) { - // Ref count was incremented. Need to decrement on exception here. - rcOmSnapshot.decrementRefCount(); - throw new OMException("Unable to load snapshot. " + - "Snapshot with table key '" + key + "' is no longer active", - FILE_NOT_FOUND); - } - // Check if any entries can be cleaned up. // At this point, cache size might temporarily exceed cacheSizeLimit // even if there are entries that can be evicted, which is fine since it @@ -237,7 +221,7 @@ private synchronized void cleanup() { * TODO: [SNAPSHOT] Add new ozone debug CLI command to trigger this directly. */ private void cleanupInternal() { - for (Map.Entry> entry : dbMap.entrySet()) { + for (Map.Entry> entry : dbMap.entrySet()) { dbMap.compute(entry.getKey(), (k, v) -> { if (v == null) { throw new IllegalStateException("Key '" + k + "' does not exist in cache. The RocksDB " + @@ -252,7 +236,7 @@ private void cleanupInternal() { LOG.debug("Closing Snapshot {}. It is not being referenced anymore.", k); // Close the instance, which also closes its DB handle. try { - ((OmSnapshot) v.get()).close(); + v.get().close(); } catch (IOException ex) { throw new IllegalStateException("Error while closing snapshot DB", ex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 41e990097ecd..2a5da96f63f6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -114,7 +113,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS_DEFAULT; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.dropColumnFamilyHandle; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; @@ -150,7 +148,6 @@ public class SnapshotDiffManager implements AutoCloseable { private final ManagedRocksDB db; private final RocksDBCheckpointDiffer differ; private final OzoneManager ozoneManager; - private final SnapshotCache snapshotCache; private final CodecRegistry codecRegistry; private final ManagedColumnFamilyOptions familyOptions; // TODO: [SNAPSHOT] Use different wait time based of job status. @@ -199,7 +196,6 @@ public class SnapshotDiffManager implements AutoCloseable { public SnapshotDiffManager(ManagedRocksDB db, RocksDBCheckpointDiffer differ, OzoneManager ozoneManager, - SnapshotCache snapshotCache, ColumnFamilyHandle snapDiffJobCfh, ColumnFamilyHandle snapDiffReportCfh, ManagedColumnFamilyOptions familyOptions, @@ -207,7 +203,6 @@ public SnapshotDiffManager(ManagedRocksDB db, this.db = db; this.differ = differ; this.ozoneManager = ozoneManager; - this.snapshotCache = snapshotCache; this.familyOptions = familyOptions; this.codecRegistry = codecRegistry; this.defaultWaitTime = ozoneManager.getConfiguration().getTimeDuration( @@ -832,8 +827,8 @@ void generateSnapshotDiffReport(final String jobKey, // job by RocksDBCheckpointDiffer#pruneOlderSnapshotsWithCompactionHistory. Path path = Paths.get(sstBackupDirForSnapDiffJobs + "/" + jobId); - ReferenceCounted rcFromSnapshot = null; - ReferenceCounted rcToSnapshot = null; + ReferenceCounted rcFromSnapshot = null; + ReferenceCounted rcToSnapshot = null; try { if (!areDiffJobAndSnapshotsActive(volumeName, bucketName, @@ -841,14 +836,15 @@ void generateSnapshotDiffReport(final String jobKey, return; } - String fsKey = getTableKey(volumeName, bucketName, fromSnapshotName); - String tsKey = getTableKey(volumeName, bucketName, toSnapshotName); + rcFromSnapshot = + ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, fromSnapshotName); + rcToSnapshot = + ozoneManager.getOmSnapshotManager() + .getActiveSnapshot(volumeName, bucketName, toSnapshotName); - rcFromSnapshot = snapshotCache.get(fsKey); - rcToSnapshot = snapshotCache.get(tsKey); - - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fsInfo = getSnapshotInfo(ozoneManager, volumeName, bucketName, fromSnapshotName); SnapshotInfo tsInfo = getSnapshotInfo(ozoneManager, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index e1ae8f57d15e..9cc79012dc17 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -66,7 +66,6 @@ import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.truncateFileName; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -176,9 +175,9 @@ public void testCloseOnEviction() throws IOException { // retrieve it and setup store mock OmSnapshotManager omSnapshotManager = om.getOmSnapshotManager(); - OmSnapshot firstSnapshot = (OmSnapshot) omSnapshotManager - .checkForSnapshot(first.getVolumeName(), - first.getBucketName(), getSnapshotPrefix(first.getName()), false).get(); + OmSnapshot firstSnapshot = omSnapshotManager + .getActiveSnapshot(first.getVolumeName(), first.getBucketName(), first.getName()) + .get(); DBStore firstSnapshotStore = mock(DBStore.class); HddsWhiteboxTestUtils.setInternalState( firstSnapshot.getMetadataManager(), "store", firstSnapshotStore); @@ -192,13 +191,12 @@ public void testCloseOnEviction() throws IOException { // read in second snapshot to evict first omSnapshotManager - .checkForSnapshot(second.getVolumeName(), - second.getBucketName(), getSnapshotPrefix(second.getName()), false); + .getActiveSnapshot(second.getVolumeName(), second.getBucketName(), second.getName()); // As a workaround, invalidate all cache entries in order to trigger // instances close in this test case, since JVM GC most likely would not // have triggered and closed the instances yet at this point. - omSnapshotManager.getSnapshotCache().invalidateAll(); + omSnapshotManager.invalidateCache(); // confirm store was closed verify(firstSnapshotStore, timeout(3000).times(1)).close(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java index a8b026af05b5..8ebf76cbf7a3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java @@ -35,7 +35,6 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -461,11 +460,12 @@ private Set getKeysFromSnapshot(String volume, String snapshot) throws IOException { SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volume, bucket, snapshot)); - try (ReferenceCounted - snapshotMetadataReader = om.getOmSnapshotManager() - .getSnapshotCache() - .get(snapshotInfo.getTableKey())) { - OmSnapshot omSnapshot = (OmSnapshot) snapshotMetadataReader.get(); + try (ReferenceCounted snapshotMetadataReader = + om.getOmSnapshotManager().getActiveSnapshot( + snapshotInfo.getVolumeName(), + snapshotInfo.getBucketName(), + snapshotInfo.getName())) { + OmSnapshot omSnapshot = snapshotMetadataReader.get(); return getKeysFromDb(omSnapshot.getMetadataManager(), volume, bucket); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index ff3db1abbe20..a912f549b3ce 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.UUID; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -31,7 +30,6 @@ import org.apache.hadoop.ozone.om.request.snapshot.TestOMSnapshotCreateRequest; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.junit.jupiter.api.Test; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; @@ -44,7 +42,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -219,13 +216,12 @@ public void testKeyPurgeInSnapshot() throws Exception { .setName("snap1") .build(); - ReferenceCounted rcOmSnapshot = - ozoneManager.getOmSnapshotManager().checkForSnapshot( + ReferenceCounted rcOmSnapshot = + ozoneManager.getOmSnapshotManager().getSnapshot( fromSnapshotInfo.getVolumeName(), fromSnapshotInfo.getBucketName(), - getSnapshotPrefix(fromSnapshotInfo.getName()), - true); - OmSnapshot omSnapshot = (OmSnapshot) rcOmSnapshot.get(); + fromSnapshotInfo.getName()); + OmSnapshot omSnapshot = rcOmSnapshot.get(); // The keys should be present in the snapshot's deletedTable for (String deletedKey : deletedKeyNames) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 47b090f88d43..fde83d7b7697 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -42,7 +42,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.security.acl.OzoneNativeAuthorizer; @@ -168,7 +167,7 @@ public void setup() throws Exception { when(ozoneManager.getAccessAuthorizer()) .thenReturn(new OzoneNativeAuthorizer()); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); // Init OmMetadataReader to let the test pass diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index c01bb459b8f4..16cb9b6821a1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataReader; import org.apache.hadoop.ozone.om.IOmMetadataReader; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -86,7 +85,7 @@ public void setup() throws Exception { when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); auditLogger = mock(AuditLogger.class); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); // Init OmMetadataReader to let the test pass diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java index ca737d2bd254..03dc7862e35a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotDeleteRequest.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -110,8 +109,6 @@ public void setup() throws Exception { doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.getSnapshotCache()) - .thenReturn(mock(SnapshotCache.class)); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); volumeName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index a3b0dae46315..71882c3423e0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotCreateResponse; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -115,7 +114,7 @@ void setup(@TempDir File testDir) throws Exception { when(ozoneManager.isAdmin(any())).thenReturn(true); when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); - ReferenceCounted rcOmMetadataReader = + ReferenceCounted rcOmMetadataReader = mock(ReferenceCounted.class); when(ozoneManager.getOmMetadataReader()).thenReturn(rcOmMetadataReader); omSnapshotManager = new OmSnapshotManager(ozoneManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 77bf15ed76b1..d745a01e62e8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -52,7 +51,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.ozone.test.OzoneTestBase; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; @@ -82,7 +80,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -359,10 +356,9 @@ void testSnapshotDeepClean() throws Exception { keyDeletingService.resume(); - try (ReferenceCounted rcOmSnapshot = - om.getOmSnapshotManager().checkForSnapshot( - volumeName, bucketName, getSnapshotPrefix(snap3), true)) { - OmSnapshot snapshot3 = (OmSnapshot) rcOmSnapshot.get(); + try (ReferenceCounted rcOmSnapshot = + om.getOmSnapshotManager().getSnapshot(volumeName, bucketName, snap3)) { + OmSnapshot snapshot3 = rcOmSnapshot.get(); Table snap3deletedTable = snapshot3.getMetadataManager().getDeletedTable(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index cecd7a99af2b..2a70a1f09acc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -18,9 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import com.google.common.cache.CacheLoader; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OmSnapshot; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -34,14 +32,12 @@ import java.io.IOException; -import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -52,15 +48,11 @@ class TestSnapshotCache { private static final int CACHE_SIZE_LIMIT = 3; - private static OmSnapshotManager omSnapshotManager; private static CacheLoader cacheLoader; private SnapshotCache snapshotCache; @BeforeAll static void beforeAll() throws Exception { - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.isSnapshotStatus(any(), eq(SNAPSHOT_ACTIVE))) - .thenReturn(true); cacheLoader = mock(CacheLoader.class); // Create a difference mock OmSnapshot instance each time load() is called when(cacheLoader.load(any())).thenAnswer( @@ -81,8 +73,7 @@ static void beforeAll() throws Exception { @BeforeEach void setUp() { // Reset cache for each test case - snapshotCache = new SnapshotCache( - omSnapshotManager, cacheLoader, CACHE_SIZE_LIMIT); + snapshotCache = new SnapshotCache(cacheLoader, CACHE_SIZE_LIMIT); } @AfterEach @@ -95,8 +86,7 @@ void tearDown() { @DisplayName("01. get()") void testGet() throws IOException { final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertNotNull(omSnapshot.get()); assertInstanceOf(OmSnapshot.class, omSnapshot.get()); @@ -107,13 +97,11 @@ void testGet() throws IOException { @DisplayName("02. get() same entry twice yields one cache entry only") void testGetTwice() throws IOException { final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); - ReferenceCounted omSnapshot1again = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1again = snapshotCache.get(dbKey1); // Should be the same instance assertEquals(omSnapshot1, omSnapshot1again); assertEquals(omSnapshot1.get(), omSnapshot1again.get()); @@ -124,8 +112,7 @@ void testGetTwice() throws IOException { @DisplayName("03. release(String)") void testReleaseByDbKey() throws IOException { final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertNotNull(omSnapshot1.get()); assertEquals(1, snapshotCache.size()); @@ -139,12 +126,11 @@ void testReleaseByDbKey() throws IOException { @DisplayName("04. release(OmSnapshot)") void testReleaseByOmSnapshotInstance() throws IOException { final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); - snapshotCache.release((OmSnapshot) omSnapshot1.get()); + snapshotCache.release(omSnapshot1.get()); // Entry will not be immediately evicted assertEquals(1, snapshotCache.size()); } @@ -153,8 +139,7 @@ void testReleaseByOmSnapshotInstance() throws IOException { @DisplayName("05. invalidate()") void testInvalidate() throws IOException { final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertEquals(1, snapshotCache.size()); @@ -170,22 +155,19 @@ void testInvalidate() throws IOException { @DisplayName("06. invalidateAll()") void testInvalidateAll() throws IOException { final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = - snapshotCache.get(dbKey1); + ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); final String dbKey2 = "dbKey2"; - ReferenceCounted omSnapshot2 = - snapshotCache.get(dbKey2); + ReferenceCounted omSnapshot2 = snapshotCache.get(dbKey2); assertNotNull(omSnapshot2); assertEquals(2, snapshotCache.size()); // Should be difference omSnapshot instances assertNotEquals(omSnapshot1, omSnapshot2); final String dbKey3 = "dbKey3"; - ReferenceCounted omSnapshot3 = - snapshotCache.get(dbKey3); + ReferenceCounted omSnapshot3 = snapshotCache.get(dbKey3); assertNotNull(omSnapshot3); assertEquals(3, snapshotCache.size()); @@ -279,7 +261,7 @@ void testEviction2() throws IOException { void testEviction3WithClose() throws IOException { final String dbKey1 = "dbKey1"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); } @@ -289,11 +271,11 @@ void testEviction3WithClose() throws IOException { assertEquals(1, snapshotCache.size()); final String dbKey2 = "dbKey2"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(2, snapshotCache.size()); // Get dbKey2 entry a second time - try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { + try (ReferenceCounted rcOmSnapshot2 = snapshotCache.get(dbKey2)) { assertEquals(2L, rcOmSnapshot.getTotalRefCount()); assertEquals(2L, rcOmSnapshot2.getTotalRefCount()); assertEquals(2, snapshotCache.size()); @@ -304,7 +286,7 @@ void testEviction3WithClose() throws IOException { assertEquals(2, snapshotCache.size()); final String dbKey3 = "dbKey3"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(3, snapshotCache.size()); } @@ -312,7 +294,7 @@ void testEviction3WithClose() throws IOException { assertEquals(3, snapshotCache.size()); final String dbKey4 = "dbKey4"; - try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { + try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index b92546c2899b..a6461182f2f6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; -import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -217,9 +216,6 @@ public class TestSnapshotDiffManager { private OzoneManager ozoneManager; @Mock private OzoneConfiguration configuration; - - private SnapshotCache snapshotCache; - @Mock private Table snapshotInfoTable; @Mock @@ -382,23 +378,30 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getConfiguration()).thenReturn(configuration); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); - CacheLoader loader = - new CacheLoader() { - @Nonnull - @Override - public OmSnapshot load(@Nonnull String key) { - return getMockedOmSnapshot(key); - } - }; - omSnapshotManager = mock(OmSnapshotManager.class); when(omSnapshotManager.isSnapshotStatus( any(), any())).thenReturn(true); - snapshotCache = new SnapshotCache(omSnapshotManager, loader, 10); - + SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10); + + when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) + .thenAnswer(invocationOnMock -> { + String snapshotTableKey = SnapshotInfo.getTableKey(invocationOnMock.getArgument(0), + invocationOnMock.getArgument(1), invocationOnMock.getArgument(2)); + return snapshotCache.get(snapshotTableKey); + }); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, - snapshotCache, snapDiffJobTable, snapDiffReportTable, - columnFamilyOptions, codecRegistry); + snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); + } + + private CacheLoader mockCacheLoader() { + return new CacheLoader() { + @Nonnull + @Override + public OmSnapshot load(@Nonnull String key) { + return getMockedOmSnapshot(key); + } + }; } @AfterEach @@ -444,12 +447,12 @@ public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { eq(diffDir)) ).thenReturn(Lists.newArrayList(randomStrings)); - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap2); @@ -509,12 +512,12 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, .thenReturn(Collections.emptyList()); } - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); @@ -572,12 +575,12 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) any(DifferSnapshotInfo.class), anyString()); - ReferenceCounted rcFromSnapshot = - snapshotCache.get(snap1.toString()); - ReferenceCounted rcToSnapshot = - snapshotCache.get(snap2.toString()); - OmSnapshot fromSnapshot = (OmSnapshot) rcFromSnapshot.get(); - OmSnapshot toSnapshot = (OmSnapshot) rcToSnapshot.get(); + ReferenceCounted rcFromSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap1.toString()); + ReferenceCounted rcToSnapshot = + omSnapshotManager.getActiveSnapshot(VOLUME_NAME, BUCKET_NAME, snap2.toString()); + OmSnapshot fromSnapshot = rcFromSnapshot.get(); + OmSnapshot toSnapshot = rcToSnapshot.get(); SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); @@ -680,8 +683,7 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, getMockedTable(fromSnapshotTableMap, snapshotTableName); snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, - snapshotCache, snapDiffJobTable, snapDiffReportTable, - columnFamilyOptions, codecRegistry); + snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); SnapshotDiffManager spy = spy(snapshotDiffManager); doAnswer(invocation -> { From af25a480050c6036b8c2019b835ee0a56d9c40ac Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Fri, 9 Feb 2024 11:14:13 -0800 Subject: [PATCH 012/108] HDDS-10340. Skip ci on dashboard updates (#6203) Co-authored-by: Ritesh H Shukla --- dev-support/ci/selective_ci_checks.bats | 12 ++++++++++++ dev-support/ci/selective_ci_checks.sh | 2 ++ 2 files changed, 14 insertions(+) diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index 9fe1708c9137..a21a4c387b88 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -57,6 +57,18 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=false } +@test "dashboard only" { + run dev-support/ci/selective_ci_checks.sh 039dea9 + + assert_output -p 'basic-checks=["rat"]' + assert_output -p needs-build=false + assert_output -p needs-compile=false + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=false +} + @test "compose and robot" { run dev-support/ci/selective_ci_checks.sh b83039eef diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index 996bd382be36..3cfeaa4a6ece 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -233,6 +233,7 @@ function get_count_compose_files() { local ignore_array=( "^hadoop-ozone/dist/src/main/k8s" "^hadoop-ozone/dist/src/main/license" + "^hadoop-ozone/dist/src/main/compose/common/grafana/dashboards" "\.md$" ) filter_changed_files true @@ -494,6 +495,7 @@ function get_count_misc_files() { "\.md$" "findbugsExcludeFile.xml" "/NOTICE$" + "^hadoop-ozone/dist/src/main/compose/common/grafana/dashboards" ) local ignore_array=( "^.github/workflows/post-commit.yml" From 47ef84c59c668742757a3bc67df9dc606fef683d Mon Sep 17 00:00:00 2001 From: tanvipenumudy <46785609+tanvipenumudy@users.noreply.github.com> Date: Sat, 10 Feb 2024 01:48:49 +0530 Subject: [PATCH 013/108] HDDS-10318. Add OM client protocol metrics to Ozone - ListKey Metrics dashboard (#6183) Co-authored-by: tanvipenumudy --- .../dashboards/Ozone - ListKey Metrics.json | 111 ++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json index c5db476b69a2..7fc43b286cb1 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - ListKey Metrics.json @@ -19,6 +19,117 @@ "links": [], "liveNow": false, "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 19, + "panels": [], + "title": "OM Client Protocol Metrics", + "type": "row" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "om_client_protocol_concurrency", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{hostname}}, {{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Number of Requests Processed Concurrently", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { From 7c79246836cbf1995276d5c9b4f479652d670e46 Mon Sep 17 00:00:00 2001 From: ashishkumar50 <117710273+ashishkumar50@users.noreply.github.com> Date: Sat, 10 Feb 2024 06:40:32 +0530 Subject: [PATCH 014/108] HDDS-10256. Retry block allocation when SCM is in safe mode. (#6189) Co-authored-by: ashishk (cherry picked from commit 370b9d7c5cb85344cef4ab360776030baf395b27) --- ...ManagerProtocolClientSideTranslatorPB.java | 41 +++++++++++--- .../hadoop/ozone/om/TestScmSafeMode.java | 54 +++++++++++++++++++ 2 files changed, 89 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 5bb1362075ac..67d798732623 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -226,6 +226,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_S3_CALLER_CONTEXT_PREFIX; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_IN_SAFE_MODE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse; @@ -255,6 +256,10 @@ public final class OzoneManagerProtocolClientSideTranslatorPB = new ThreadLocal<>(); private boolean s3AuthCheck; + + public static final int BLOCK_ALLOCATION_RETRY_COUNT = 5; + public static final int BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS = 3000; + public OzoneManagerProtocolClientSideTranslatorPB(OmTransport omTransport, String clientId) { this.clientID = clientId; @@ -725,8 +730,7 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setCreateKeyRequest(req) .build(); - CreateKeyResponse keyResponse = - handleError(submitRequest(omRequest)).getCreateKeyResponse(); + CreateKeyResponse keyResponse = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getCreateKeyResponse(); return new OpenKeySession(keyResponse.getID(), OmKeyInfo.getFromProtobuf(keyResponse.getKeyInfo()), keyResponse.getOpenVersion()); @@ -771,8 +775,7 @@ public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientId, .setAllocateBlockRequest(req) .build(); - AllocateBlockResponse resp = handleError(submitRequest(omRequest)) - .getAllocateBlockResponse(); + AllocateBlockResponse resp = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getAllocateBlockResponse(); return OmKeyLocationInfo.getFromProtobuf(resp.getKeyLocation()); } @@ -2210,12 +2213,38 @@ public OpenKeySession createFile(OmKeyArgs args, OMRequest omRequest = createOMRequest(Type.CreateFile) .setCreateFileRequest(createFileRequest) .build(); - CreateFileResponse resp = - handleError(submitRequest(omRequest)).getCreateFileResponse(); + CreateFileResponse resp = handleSubmitRequestAndSCMSafeModeRetry(omRequest).getCreateFileResponse(); + return new OpenKeySession(resp.getID(), OmKeyInfo.getFromProtobuf(resp.getKeyInfo()), resp.getOpenVersion()); } + + @Nonnull + private OMResponse handleSubmitRequestAndSCMSafeModeRetry(OMRequest omRequest) throws IOException { + int retryCount = BLOCK_ALLOCATION_RETRY_COUNT; + while (true) { + try { + return handleError(submitRequest(omRequest)); + } catch (OMException e) { + if (e.getResult().equals(SCM_IN_SAFE_MODE) && retryCount > 0) { + System.err.println("SCM is in safe mode. Will retry in " + + BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS + "ms"); + retryCount--; + try { + Thread.sleep(BLOCK_ALLOCATION_RETRY_WAIT_TIME_MS); + continue; + } catch (InterruptedException ex) { + throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); + } + } else if (e.getResult().equals(SCM_IN_SAFE_MODE) && retryCount == 0) { + throw new OMException(e.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); + } + throw e; + } + } + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, boolean allowPartialPrefixes) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 2f06304bd1e3..d5c2c64c208a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -17,6 +17,12 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.SafeMode; +import org.apache.hadoop.fs.SafeModeAction; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.HddsConfigKeys; @@ -55,6 +61,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeoutException; import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; @@ -63,6 +70,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -335,4 +344,49 @@ public void testSCMSafeModeDisabled() throws Exception { cluster.restartStorageContainerManager(true); assertFalse(scm.isInSafeMode()); } + + @Test + public void testCreateRetryWhileSCMSafeMode() throws Exception { + // Test1: Test safe mode when there are no containers in system. + cluster.stop(); + + try { + cluster = builder.build(); + } catch (IOException e) { + fail("Cluster startup failed."); + } + + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + OMMetrics omMetrics = cluster.getOzoneManager().getMetrics(); + long allocateBlockReqCount = omMetrics.getNumBlockAllocateFails(); + + try (FileSystem fs = FileSystem.get(conf)) { + assertTrue(((SafeMode)fs).setSafeMode(SafeModeAction.GET)); + + Thread t = new Thread(() -> { + try { + LOG.info("Wait for allocate block fails at least once"); + GenericTestUtils.waitFor(() -> omMetrics.getNumBlockAllocateFails() > allocateBlockReqCount, + 100, 10000); + + cluster.startHddsDatanodes(); + cluster.waitForClusterToBeReady(); + cluster.waitTobeOutOfSafeMode(); + } catch (InterruptedException | TimeoutException e) { + throw new RuntimeException(e); + } + }); + t.start(); + + final Path file = new Path("file"); + try (FSDataOutputStream outputStream = fs.create(file, true)) { + LOG.info("Successfully created a file"); + } + t.join(); + } + + assertFalse(cluster.getStorageContainerManager().isInSafeMode()); + } } From c35e99f04b9662061a8aac0429033959abf83707 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Sat, 10 Feb 2024 14:14:18 -0800 Subject: [PATCH 015/108] HDDS-10250. Use SnapshotId as key in SnapshotCache (#6139) --- .../hadoop/ozone/om/OmSnapshotManager.java | 42 +++++----- .../snapshot/OMSnapshotPurgeRequest.java | 2 +- .../ozone/om/snapshot/SnapshotCache.java | 67 +++++++--------- .../ozone/om/TestOmSnapshotManager.java | 7 ++ .../ozone/om/snapshot/TestSnapshotCache.java | 76 ++++++++----------- .../om/snapshot/TestSnapshotDiffManager.java | 33 +++++--- 6 files changed, 113 insertions(+), 114 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index e22d6b309739..eb37e399dfe6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -35,6 +35,7 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.UUID; import com.google.common.cache.RemovalListener; import org.apache.hadoop.hdds.StringUtils; @@ -244,7 +245,7 @@ public OmSnapshotManager(OzoneManager ozoneManager) { OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE, OZONE_OM_SNAPSHOT_CACHE_MAX_SIZE_DEFAULT); - CacheLoader loader = createCacheLoader(); + CacheLoader loader = createCacheLoader(); // TODO: [SNAPSHOT] Remove this if not going to make SnapshotCache impl // pluggable. @@ -325,19 +326,25 @@ public boolean canDisableFsSnapshot(OMMetadataManager ommm) { return isSnapshotInfoTableEmpty; } - private CacheLoader createCacheLoader() { - return new CacheLoader() { + private CacheLoader createCacheLoader() { + return new CacheLoader() { @Nonnull @Override - public OmSnapshot load(@Nonnull String snapshotTableKey) - throws IOException { - // Check if the snapshot exists - final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); + public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { + String snapshotTableKey = ((OmMetadataManagerImpl) ozoneManager.getMetadataManager()) + .getSnapshotChainManager() + .getTableKey(snapshotId); + + // SnapshotChain maintains in-memory reverse mapping of snapshotId to snapshotName based on snapshotInfoTable. + // So it should not happen ideally. + // If it happens, then either snapshot has been purged in between or SnapshotChain is corrupted + // and missing some entries which needs investigation. + if (snapshotTableKey == null) { + throw new IOException("No snapshot exist with snapshotId: " + snapshotId); + } - // Block snapshot from loading when it is no longer active e.g. DELETED, - // unless this is called from SnapshotDeletingService. - checkSnapshotActive(snapshotInfo, true); + final SnapshotInfo snapshotInfo = getSnapshotInfo(snapshotTableKey); CacheValue cacheValue = ozoneManager.getMetadataManager() .getSnapshotInfoTable() @@ -417,9 +424,9 @@ public void invalidateCache() { /** * Immediately invalidate an entry. * - * @param key DB snapshot table key + * @param key SnapshotId. */ - public void invalidateCacheEntry(String key) throws IOException { + public void invalidateCacheEntry(UUID key) throws IOException { if (snapshotCache != null) { snapshotCache.invalidate(key); } @@ -663,17 +670,16 @@ private ReferenceCounted getSnapshot( return getSnapshot(snapshotTableKey, skipActiveCheck); } - private ReferenceCounted getSnapshot( - String snapshotTableKey, - boolean skipActiveCheck) throws IOException { - + private ReferenceCounted getSnapshot(String snapshotTableKey, boolean skipActiveCheck) + throws IOException { + SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, snapshotTableKey); // Block FS API reads when snapshot is not active. if (!skipActiveCheck) { - checkSnapshotActive(ozoneManager, snapshotTableKey); + checkSnapshotActive(snapshotInfo, false); } // retrieve the snapshot from the cache - return snapshotCache.get(snapshotTableKey); + return snapshotCache.get(snapshotInfo.getSnapshotId()); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 1533ceebe336..0fa9087e25e7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -91,7 +91,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn trxnLogIndex, updatedSnapInfos); updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); - ozoneManager.getOmSnapshotManager().invalidateCacheEntry(snapTableKey); + ozoneManager.getOmSnapshotManager().invalidateCacheEntry(fromSnapshot.getSnapshotId()); } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index e776968fcaf4..0b64d6d069b4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.util.Iterator; import java.util.Map; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; @@ -39,26 +40,25 @@ public class SnapshotCache { static final Logger LOG = LoggerFactory.getLogger(SnapshotCache.class); // Snapshot cache internal hash map. - // Key: DB snapshot table key + // Key: SnapshotId // Value: OmSnapshot instance, each holds a DB instance handle inside // TODO: [SNAPSHOT] Consider wrapping SoftReference<> around IOmMetadataReader - private final ConcurrentHashMap> dbMap; + private final ConcurrentHashMap> dbMap; + + private final CacheLoader cacheLoader; - private final CacheLoader cacheLoader; // Soft-limit of the total number of snapshot DB instances allowed to be // opened on the OM. private final int cacheSizeLimit; - public SnapshotCache( - CacheLoader cacheLoader, - int cacheSizeLimit) { + public SnapshotCache(CacheLoader cacheLoader, int cacheSizeLimit) { this.dbMap = new ConcurrentHashMap<>(); this.cacheLoader = cacheLoader; this.cacheSizeLimit = cacheSizeLimit; } @VisibleForTesting - ConcurrentHashMap> getDbMap() { + ConcurrentHashMap> getDbMap() { return dbMap; } @@ -71,17 +71,17 @@ public int size() { /** * Immediately invalidate an entry. - * @param key DB snapshot table key + * @param key SnapshotId */ - public void invalidate(String key) throws IOException { + public void invalidate(UUID key) throws IOException { dbMap.compute(key, (k, v) -> { if (v == null) { - LOG.warn("Key: '{}' does not exist in cache.", k); + LOG.warn("SnapshotId: '{}' does not exist in snapshot cache.", k); } else { try { v.get().close(); } catch (IOException e) { - throw new IllegalStateException("Failed to close snapshot: " + key, e); + throw new IllegalStateException("Failed to close snapshotId: " + key, e); } } return null; @@ -92,11 +92,10 @@ public void invalidate(String key) throws IOException { * Immediately invalidate all entries and close their DB instances in cache. */ public void invalidateAll() { - Iterator>> - it = dbMap.entrySet().iterator(); + Iterator>> it = dbMap.entrySet().iterator(); while (it.hasNext()) { - Map.Entry> entry = it.next(); + Map.Entry> entry = it.next(); OmSnapshot omSnapshot = entry.getValue().get(); try { // TODO: If wrapped with SoftReference<>, omSnapshot could be null? @@ -114,7 +113,7 @@ public void invalidateAll() { */ public enum Reason { FS_API_READ, - SNAPDIFF_READ, + SNAP_DIFF_READ, DEEP_CLEAN_WRITE, GARBAGE_COLLECTION_WRITE } @@ -122,11 +121,10 @@ public enum Reason { /** * Get or load OmSnapshot. Shall be close()d after use. * TODO: [SNAPSHOT] Can add reason enum to param list later. - * @param key snapshot table key + * @param key SnapshotId * @return an OmSnapshot instance, or null on error */ - public ReferenceCounted get(String key) - throws IOException { + public ReferenceCounted get(UUID key) throws IOException { // Warn if actual cache size exceeds the soft limit already. if (size() > cacheSizeLimit) { LOG.warn("Snapshot cache size ({}) exceeds configured soft-limit ({}).", @@ -137,9 +135,9 @@ public ReferenceCounted get(String key) ReferenceCounted rcOmSnapshot = dbMap.compute(key, (k, v) -> { if (v == null) { - LOG.info("Loading snapshot. Table key: {}", k); + LOG.info("Loading SnapshotId: '{}'", k); try { - v = new ReferenceCounted<>(cacheLoader.load(k), false, this); + v = new ReferenceCounted<>(cacheLoader.load(key), false, this); } catch (OMException omEx) { // Return null if the snapshot is no longer active if (!omEx.getResult().equals(FILE_NOT_FOUND)) { @@ -163,8 +161,7 @@ public ReferenceCounted get(String key) if (rcOmSnapshot == null) { // The only exception that would fall through the loader logic above // is OMException with FILE_NOT_FOUND. - throw new OMException("Snapshot table key '" + key + "' not found, " - + "or the snapshot is no longer active", + throw new OMException("SnapshotId: '" + key + "' not found, or the snapshot is no longer active.", OMException.ResultCodes.FILE_NOT_FOUND); } @@ -179,12 +176,12 @@ public ReferenceCounted get(String key) /** * Release the reference count on the OmSnapshot instance. - * @param key snapshot table key + * @param key SnapshotId */ - public void release(String key) { + public void release(UUID key) { dbMap.compute(key, (k, v) -> { if (v == null) { - throw new IllegalArgumentException("Key '" + key + "' does not exist in cache."); + throw new IllegalArgumentException("SnapshotId '" + key + "' does not exist in cache."); } else { v.decrementRefCount(); } @@ -196,15 +193,6 @@ public void release(String key) { cleanup(); } - /** - * Alternatively, can release with OmSnapshot instance directly. - * @param omSnapshot OmSnapshot - */ - public void release(OmSnapshot omSnapshot) { - final String snapshotTableKey = omSnapshot.getSnapshotTableKey(); - release(snapshotTableKey); - } - /** * Wrapper for cleanupInternal() that is synchronized to prevent multiple * threads from interleaving into the cleanup method. @@ -221,24 +209,23 @@ private synchronized void cleanup() { * TODO: [SNAPSHOT] Add new ozone debug CLI command to trigger this directly. */ private void cleanupInternal() { - for (Map.Entry> entry : dbMap.entrySet()) { + for (Map.Entry> entry : dbMap.entrySet()) { dbMap.compute(entry.getKey(), (k, v) -> { if (v == null) { - throw new IllegalStateException("Key '" + k + "' does not exist in cache. The RocksDB " + + throw new IllegalStateException("SnapshotId '" + k + "' does not exist in cache. The RocksDB " + "instance of the Snapshot may not be closed properly."); } if (v.getTotalRefCount() > 0) { - LOG.debug("Snapshot {} is still being referenced ({}), skipping its clean up", - k, v.getTotalRefCount()); + LOG.debug("SnapshotId {} is still being referenced ({}), skipping its clean up.", k, v.getTotalRefCount()); return v; } else { - LOG.debug("Closing Snapshot {}. It is not being referenced anymore.", k); + LOG.debug("Closing SnapshotId {}. It is not being referenced anymore.", k); // Close the instance, which also closes its DB handle. try { v.get().close(); } catch (IOException ex) { - throw new IllegalStateException("Error while closing snapshot DB", ex); + throw new IllegalStateException("Error while closing snapshot DB.", ex); } return null; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 9cc79012dc17..c865cb7814de 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -166,9 +166,16 @@ public void testCloseOnEviction() throws IOException { SnapshotInfo first = createSnapshotInfo(volumeName, bucketName); SnapshotInfo second = createSnapshotInfo(volumeName, bucketName); + first.setGlobalPreviousSnapshotId(null); + first.setPathPreviousSnapshotId(null); + second.setGlobalPreviousSnapshotId(first.getSnapshotId()); + second.setPathPreviousSnapshotId(first.getSnapshotId()); + when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); when(snapshotInfoTable.get(second.getTableKey())).thenReturn(second); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(first); + ((OmMetadataManagerImpl) om.getMetadataManager()).getSnapshotChainManager().addSnapshot(second); // create the first snapshot checkpoint OmSnapshotManager.createOmSnapshotCheckpoint(om.getMetadataManager(), first); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index 2a70a1f09acc..21b795216def 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -31,6 +31,7 @@ import org.slf4j.event.Level; import java.io.IOException; +import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -48,7 +49,7 @@ class TestSnapshotCache { private static final int CACHE_SIZE_LIMIT = 3; - private static CacheLoader cacheLoader; + private static CacheLoader cacheLoader; private SnapshotCache snapshotCache; @BeforeAll @@ -59,8 +60,8 @@ static void beforeAll() throws Exception { (Answer) invocation -> { final OmSnapshot omSnapshot = mock(OmSnapshot.class); // Mock the snapshotTable return value for the lookup inside release() - final String dbKey = (String) invocation.getArguments()[0]; - when(omSnapshot.getSnapshotTableKey()).thenReturn(dbKey); + final UUID snapshotID = (UUID) invocation.getArguments()[0]; + when(omSnapshot.getSnapshotTableKey()).thenReturn(snapshotID.toString()); return omSnapshot; } @@ -83,9 +84,9 @@ void tearDown() { } @Test - @DisplayName("01. get()") + @DisplayName("get()") void testGet() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertNotNull(omSnapshot.get()); @@ -94,9 +95,9 @@ void testGet() throws IOException { } @Test - @DisplayName("02. get() same entry twice yields one cache entry only") + @DisplayName("get() same entry twice yields one cache entry only") void testGetTwice() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); @@ -109,9 +110,9 @@ void testGetTwice() throws IOException { } @Test - @DisplayName("03. release(String)") + @DisplayName("release(String)") void testReleaseByDbKey() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertNotNull(omSnapshot1.get()); @@ -123,22 +124,9 @@ void testReleaseByDbKey() throws IOException { } @Test - @DisplayName("04. release(OmSnapshot)") - void testReleaseByOmSnapshotInstance() throws IOException { - final String dbKey1 = "dbKey1"; - ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); - assertNotNull(omSnapshot1); - assertEquals(1, snapshotCache.size()); - - snapshotCache.release(omSnapshot1.get()); - // Entry will not be immediately evicted - assertEquals(1, snapshotCache.size()); - } - - @Test - @DisplayName("05. invalidate()") + @DisplayName("invalidate()") void testInvalidate() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); ReferenceCounted omSnapshot = snapshotCache.get(dbKey1); assertNotNull(omSnapshot); assertEquals(1, snapshotCache.size()); @@ -152,21 +140,21 @@ void testInvalidate() throws IOException { } @Test - @DisplayName("06. invalidateAll()") + @DisplayName("invalidateAll()") void testInvalidateAll() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); ReferenceCounted omSnapshot1 = snapshotCache.get(dbKey1); assertNotNull(omSnapshot1); assertEquals(1, snapshotCache.size()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); ReferenceCounted omSnapshot2 = snapshotCache.get(dbKey2); assertNotNull(omSnapshot2); assertEquals(2, snapshotCache.size()); // Should be difference omSnapshot instances assertNotEquals(omSnapshot1, omSnapshot2); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); ReferenceCounted omSnapshot3 = snapshotCache.get(dbKey3); assertNotNull(omSnapshot3); assertEquals(3, snapshotCache.size()); @@ -182,7 +170,7 @@ void testInvalidateAll() throws IOException { assertEquals(0, snapshotCache.size()); } - private void assertEntryExistence(String key, boolean shouldExist) { + private void assertEntryExistence(UUID key, boolean shouldExist) { if (shouldExist) { snapshotCache.getDbMap().computeIfAbsent(key, k -> { fail(k + " should not have been evicted"); @@ -197,28 +185,28 @@ private void assertEntryExistence(String key, boolean shouldExist) { } @Test - @DisplayName("07. Basic cache eviction") + @DisplayName("Basic cache eviction") void testEviction1() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); snapshotCache.release(dbKey1); assertEquals(1, snapshotCache.size()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); snapshotCache.release(dbKey2); assertEquals(2, snapshotCache.size()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); snapshotCache.release(dbKey3); assertEquals(3, snapshotCache.size()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1, dbKey2 and dbKey3 would have been evicted by the end of the last get() because // those were release()d. @@ -227,22 +215,22 @@ void testEviction1() throws IOException { } @Test - @DisplayName("08. Cache eviction while exceeding soft limit") + @DisplayName("Cache eviction while exceeding soft limit") void testEviction2() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); snapshotCache.get(dbKey1); assertEquals(1, snapshotCache.size()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); snapshotCache.get(dbKey2); assertEquals(2, snapshotCache.size()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); snapshotCache.get(dbKey3); assertEquals(3, snapshotCache.size()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); snapshotCache.get(dbKey4); // dbKey1 would not have been evicted because it is not release()d assertEquals(4, snapshotCache.size()); @@ -257,10 +245,10 @@ void testEviction2() throws IOException { } @Test - @DisplayName("09. Cache eviction with try-with-resources") + @DisplayName("Cache eviction with try-with-resources") void testEviction3WithClose() throws IOException { - final String dbKey1 = "dbKey1"; + final UUID dbKey1 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey1)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); @@ -270,7 +258,7 @@ void testEviction3WithClose() throws IOException { assertEquals(0L, snapshotCache.getDbMap().get(dbKey1).getTotalRefCount()); assertEquals(1, snapshotCache.size()); - final String dbKey2 = "dbKey2"; + final UUID dbKey2 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey2)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(2, snapshotCache.size()); @@ -285,7 +273,7 @@ void testEviction3WithClose() throws IOException { assertEquals(0L, snapshotCache.getDbMap().get(dbKey2).getTotalRefCount()); assertEquals(2, snapshotCache.size()); - final String dbKey3 = "dbKey3"; + final UUID dbKey3 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey3)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(3, snapshotCache.size()); @@ -293,7 +281,7 @@ void testEviction3WithClose() throws IOException { assertEquals(0L, snapshotCache.getDbMap().get(dbKey3).getTotalRefCount()); assertEquals(3, snapshotCache.size()); - final String dbKey4 = "dbKey4"; + final UUID dbKey4 = UUID.randomUUID(); try (ReferenceCounted rcOmSnapshot = snapshotCache.get(dbKey4)) { assertEquals(1L, rcOmSnapshot.getTotalRefCount()); assertEquals(1, snapshotCache.size()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index a6461182f2f6..3f9a1b3ae5bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -324,8 +324,8 @@ public void init() throws RocksDBException, IOException, ExecutionException { OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT, TimeUnit.MILLISECONDS)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_JOB_DEFAULT_WAIT_TIME_DEFAULT); - when(configuration - .getBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, + when(configuration. + getBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT); when(configuration @@ -379,26 +379,25 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.isSnapshotStatus( - any(), any())).thenReturn(true); + when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10); when(omSnapshotManager.getActiveSnapshot(anyString(), anyString(), anyString())) .thenAnswer(invocationOnMock -> { - String snapshotTableKey = SnapshotInfo.getTableKey(invocationOnMock.getArgument(0), + SnapshotInfo snapInfo = SnapshotUtils.getSnapshotInfo(ozoneManager, invocationOnMock.getArgument(0), invocationOnMock.getArgument(1), invocationOnMock.getArgument(2)); - return snapshotCache.get(snapshotTableKey); + return snapshotCache.get(snapInfo.getSnapshotId()); }); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); } - private CacheLoader mockCacheLoader() { - return new CacheLoader() { + private CacheLoader mockCacheLoader() { + return new CacheLoader() { @Nonnull @Override - public OmSnapshot load(@Nonnull String key) { + public OmSnapshot load(@Nonnull UUID key) { return getMockedOmSnapshot(key); } }; @@ -416,9 +415,9 @@ public void tearDown() { IOUtils.closeQuietly(snapshotDiffManager); } - private OmSnapshot getMockedOmSnapshot(String snapshot) { + private OmSnapshot getMockedOmSnapshot(UUID snapshotId) { OmSnapshot omSnapshot = mock(OmSnapshot.class); - when(omSnapshot.getName()).thenReturn(snapshot); + when(omSnapshot.getName()).thenReturn(snapshotId.toString()); when(omSnapshot.getMetadataManager()).thenReturn(omMetadataManager); when(omMetadataManager.getStore()).thenReturn(dbStore); return omSnapshot; @@ -435,6 +434,10 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { public void testGetDeltaFilesWithDag(int numberOfFiles) throws IOException { UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); String diffDir = snapDiffDir.getAbsolutePath(); Set randomStrings = IntStream.range(0, numberOfFiles) @@ -504,6 +507,10 @@ public void testGetDeltaFilesWithFullDiff(int numberOfFiles, }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap2)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); if (!useFullDiff) { when(differ.getSSTDiffListWithFullPath( any(DifferSnapshotInfo.class), @@ -567,6 +574,10 @@ public void testGetDeltaFilesWithDifferThrowException(int numberOfFiles) }); UUID snap1 = UUID.randomUUID(); UUID snap2 = UUID.randomUUID(); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap1.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap1.toString(), snap1)); + when(snapshotInfoTable.get(SnapshotInfo.getTableKey(VOLUME_NAME, BUCKET_NAME, snap2.toString()))) + .thenReturn(getSnapshotInfoInstance(VOLUME_NAME, BUCKET_NAME, snap2.toString(), snap2)); doThrow(new FileNotFoundException("File not found exception.")) .when(differ) From ffd8221eba66d07b4d5185f4dc3d01180037e325 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 12 Feb 2024 08:51:40 +0100 Subject: [PATCH 016/108] HDDS-10343. Remove dependency on jsr305 (#6208) --- hadoop-hdds/common/pom.xml | 6 ++++++ hadoop-hdds/hadoop-dependency-client/pom.xml | 4 ++++ hadoop-hdds/hadoop-dependency-server/pom.xml | 4 ++++ hadoop-hdds/interface-client/pom.xml | 5 ----- hadoop-ozone/csi/pom.xml | 14 ++++++++++++++ hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 1 - .../LICENSE-com.google.code.findbugs-jsr305.txt | 8 -------- hadoop-ozone/dist/src/main/license/jar-report.txt | 1 - hadoop-ozone/interface-client/pom.xml | 6 ++++++ .../S3MultipartUploadCompleteResponse.java | 4 +--- .../S3MultipartUploadCompleteResponseWithFSO.java | 3 +-- hadoop-ozone/s3gateway/pom.xml | 6 ++++++ pom.xml | 12 ++++++------ 13 files changed, 48 insertions(+), 26 deletions(-) delete mode 100644 hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 20dce15d4d1b..807ddf7f2765 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -181,6 +181,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> grpc-api ${io.grpc.version} compile + + + com.google.code.findbugs + jsr305 + + diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 85ae7bd4b201..d2a8372bdd17 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -99,6 +99,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.pjfanning jersey-json + + com.google.code.findbugs + jsr305 + com.sun.jersey jersey-core diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index abee8cc400fb..feaf3de5a11a 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -79,6 +79,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.pjfanning jersey-json + + com.google.code.findbugs + jsr305 + com.sun.jersey jersey-json diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 76fdfad111a9..2160f7c5edbf 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -51,11 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> javax.annotation javax.annotation-api - - com.google.code.findbugs - jsr305 - compile - diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index b28db73aed6e..d40a995ab920 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -46,6 +46,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.j2objc j2objc-annotations + + com.google.code.findbugs + jsr305 + @@ -62,6 +66,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + com.google.code.findbugs + jsr305 + 3.0.2 + provided + com.google.guava guava @@ -109,6 +119,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java + + com.google.code.findbugs + jsr305 + diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index e75cc7a91270..82ee0e4e180d 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -283,7 +283,6 @@ Apache License 2.0 com.github.stephenc.jcip:jcip-annotations com.google.android:annotations com.google.api.grpc:proto-google-common-protos - com.google.code.findbugs:jsr305 com.google.code.gson:gson com.google.errorprone:error_prone_annotations com.google.guava:failureaccess diff --git a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt b/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt deleted file mode 100644 index 842476092551..000000000000 --- a/hadoop-ozone/dist/src/main/license/bin/licenses/LICENSE-com.google.code.findbugs-jsr305.txt +++ /dev/null @@ -1,8 +0,0 @@ -The JSR-305 reference implementation (lib/jsr305.jar) is -distributed under the terms of the New BSD license: - - http://www.opensource.org/licenses/bsd-license.php - -See the JSR-305 home page for more information: - - http://code.google.com/p/jsr-305/ diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 51e308623662..ee0797cf2765 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -157,7 +157,6 @@ share/ozone/lib/jooq-meta.jar share/ozone/lib/jsch.jar share/ozone/lib/json-simple.jar share/ozone/lib/jsp-api.jar -share/ozone/lib/jsr305.jar share/ozone/lib/jsr311-api.jar share/ozone/lib/kerb-core.jar share/ozone/lib/kerby-asn1.jar diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 2c1e03ce3f86..b92de2f5bc1e 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -44,6 +44,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.grpc grpc-protobuf + + + com.google.code.findbugs + jsr305 + + io.grpc diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 3e390b0288ec..9fb843dcbe14 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -32,7 +32,6 @@ .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import javax.annotation.CheckForNull; import jakarta.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; @@ -58,7 +57,6 @@ public class S3MultipartUploadCompleteResponse extends OmKeyResponse { private List allKeyInfoToRemove; private OmBucketInfo omBucketInfo; - @SuppressWarnings("checkstyle:ParameterNumber") public S3MultipartUploadCompleteResponse( @Nonnull OMResponse omResponse, @Nonnull String multipartKey, @@ -66,7 +64,7 @@ public S3MultipartUploadCompleteResponse( @Nonnull OmKeyInfo omKeyInfo, @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo) { + OmBucketInfo omBucketInfo) { super(omResponse, bucketLayout); this.allKeyInfoToRemove = allKeyInfoToRemove; this.multipartKey = multipartKey; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 29edfe382533..8774627ee66b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -27,7 +27,6 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import javax.annotation.CheckForNull; import jakarta.annotation.Nonnull; import java.io.IOException; import java.util.List; @@ -61,7 +60,7 @@ public S3MultipartUploadCompleteResponseWithFSO( @Nonnull OmKeyInfo omKeyInfo, @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, - @CheckForNull OmBucketInfo omBucketInfo, + OmBucketInfo omBucketInfo, @Nonnull long volumeId, @Nonnull long bucketId) { super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, bucketLayout, omBucketInfo); diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index f875047d04a2..18bbd906a0b1 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -99,6 +99,12 @@ io.grpc grpc-protobuf + + + com.google.code.findbugs + jsr305 + + io.grpc diff --git a/pom.xml b/pom.xml index 37dfb139e2cc..4ccf0bb34afd 100644 --- a/pom.xml +++ b/pom.xml @@ -199,7 +199,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.7.1 1.1.1 - 3.0.0 3.1.12 2.1.7 4.12.0 @@ -743,6 +742,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs com.google.guava guava ${guava.version} + + + com.google.code.findbugs + jsr305 + + com.google.code.gson @@ -1403,11 +1408,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs hadoop-cloud-storage ${hadoop.version} - - com.google.code.findbugs - jsr305 - ${findbugs.version} - jakarta.xml.bind jakarta.xml.bind-api From cd0069113ee5086f8df0981f4c758819dfc7298d Mon Sep 17 00:00:00 2001 From: Raju Balpande <146973984+raju-balpande@users.noreply.github.com> Date: Mon, 12 Feb 2024 21:56:57 +0530 Subject: [PATCH 017/108] HDDS-10218. Speed up TestSstFilteringService (#6196) --- .../ozone/om/TestSstFilteringService.java | 79 +++++++++++-------- 1 file changed, 47 insertions(+), 32 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java index 8ebf76cbf7a3..2654f4339ab4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java @@ -36,15 +36,19 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.ratis.util.ExitUtils; -import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.io.TempDir; import org.rocksdb.LiveFileMetaData; import java.io.File; import java.io.IOException; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -70,24 +74,21 @@ /** * Test SST Filtering Service. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(OrderAnnotation.class) public class TestSstFilteringService { - public static final String SST_FILE_EXTENSION = ".sst"; - @TempDir - private File folder; + private static final String SST_FILE_EXTENSION = ".sst"; private OzoneManagerProtocol writeClient; private OzoneManager om; private OzoneConfiguration conf; private KeyManager keyManager; + private short countTotalSnapshots = 0; @BeforeAll - public static void setup() { + void setup(@TempDir Path folder) throws Exception { ExitUtils.disableSystemExit(); - } - - @BeforeEach - void init() throws Exception { conf = new OzoneConfiguration(); - conf.set(OZONE_METADATA_DIRS, folder.getAbsolutePath()); + conf.set(OZONE_METADATA_DIRS, folder.toString()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, 100, @@ -101,7 +102,7 @@ void init() throws Exception { om = omTestManagers.getOzoneManager(); } - @AfterEach + @AfterAll public void cleanup() throws Exception { if (keyManager != null) { keyManager.stop(); @@ -132,6 +133,7 @@ public void cleanup() throws Exception { * @throws IOException - on Failure. */ @Test + @Order(1) public void testIrrelevantSstFileDeletion() throws Exception { RDBStore activeDbStore = (RDBStore) om.getMetadataManager().getStore(); @@ -141,7 +143,8 @@ public void testIrrelevantSstFileDeletion() final int keyCount = 100; String volumeName = "vol1"; String bucketName1 = "buck1"; - createVolumeAndBucket(volumeName, bucketName1); + createVolume(volumeName); + addBucketToVolume(volumeName, bucketName1); createKeys(volumeName, bucketName1, keyCount / 2); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); @@ -179,13 +182,13 @@ public void testIrrelevantSstFileDeletion() assertThat(nonLevel0FilesCountAfterCompact).isGreaterThan(0); String bucketName2 = "buck2"; - createVolumeAndBucket(volumeName, bucketName2); + addBucketToVolume(volumeName, bucketName2); createKeys(volumeName, bucketName2, keyCount); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); List allFiles = activeDbStore.getDb().getSstFileList(); String snapshotName1 = "snapshot1"; - writeClient.createSnapshot(volumeName, bucketName2, snapshotName1); + createSnapshot(volumeName, bucketName2, snapshotName1); SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketName2, snapshotName1)); assertFalse(snapshotInfo.isSstFiltered()); @@ -222,7 +225,7 @@ public void testIrrelevantSstFileDeletion() try (BootstrapStateHandler.Lock lock = filteringService.getBootstrapStateLock().lock()) { count = filteringService.getSnapshotFilteredCount().get(); - writeClient.createSnapshot(volumeName, bucketName2, snapshotName2); + createSnapshot(volumeName, bucketName2, snapshotName2); assertThrows(TimeoutException.class, () -> waitForSnapshotsAtLeast(filteringService, count + 1)); @@ -239,14 +242,16 @@ public void testIrrelevantSstFileDeletion() } @Test + @Order(2) public void testActiveAndDeletedSnapshotCleanup() throws Exception { RDBStore activeDbStore = (RDBStore) om.getMetadataManager().getStore(); String volumeName = "volume1"; List bucketNames = Arrays.asList("bucket1", "bucket2"); + createVolume(volumeName); // Create 2 Buckets for (String bucketName : bucketNames) { - createVolumeAndBucket(volumeName, bucketName); + addBucketToVolume(volumeName, bucketName); } // Write 25 keys in each bucket, 2 sst files would be generated each for // keys in a single bucket @@ -264,8 +269,8 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { keyManager.getSnapshotSstFilteringService(); sstFilteringService.pause(); - writeClient.createSnapshot(volumeName, bucketNames.get(0), "snap1"); - writeClient.createSnapshot(volumeName, bucketNames.get(0), "snap2"); + createSnapshot(volumeName, bucketNames.get(0), "snap1"); + createSnapshot(volumeName, bucketNames.get(0), "snap2"); SnapshotInfo snapshot1Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap1")); @@ -283,15 +288,15 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { await(10_000, 1_000, () -> snap1Current.exists() && snap2Current.exists()); long snap1SstFileCountBeforeFilter = Arrays.stream(snapshot1Dir.listFiles()) - .filter(f -> f.getName().endsWith(".sst")).count(); + .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); long snap2SstFileCountBeforeFilter = Arrays.stream(snapshot2Dir.listFiles()) - .filter(f -> f.getName().endsWith(".sst")).count(); + .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); // delete snap1 writeClient.deleteSnapshot(volumeName, bucketNames.get(0), "snap1"); sstFilteringService.resume(); // Filtering service will only act on snap2 as it is an active snaphot - waitForSnapshotsAtLeast(sstFilteringService, 2); + waitForSnapshotsAtLeast(sstFilteringService, countTotalSnapshots); long snap1SstFileCountAfterFilter = Arrays.stream(snapshot1Dir.listFiles()) .filter(f -> f.getName().endsWith(SST_FILE_EXTENSION)).count(); long snap2SstFileCountAfterFilter = Arrays.stream(snapshot2Dir.listFiles()) @@ -299,10 +304,12 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { // one sst will be filtered in both active but not in deleted snapshot // as sstFiltering svc won't run on already deleted snapshots but will mark // it as filtered. - assertEquals(2, sstFilteringService.getSnapshotFilteredCount().get()); + assertEquals(countTotalSnapshots, sstFilteringService.getSnapshotFilteredCount().get()); assertEquals(snap1SstFileCountBeforeFilter, snap1SstFileCountAfterFilter); - assertEquals(snap2SstFileCountBeforeFilter - 1, - snap2SstFileCountAfterFilter); + // If method with order 1 is run .sst file from /vol1/buck1 and /vol1/buck2 will be deleted. + // As part of this method .sst file from /volume1/bucket2/ will be deleted. + // sstFiltering won't run on deleted snapshots in /volume1/bucket1. + assertThat(snap2SstFileCountBeforeFilter).isGreaterThan(snap2SstFileCountAfterFilter); } private void createKeys(String volumeName, @@ -315,8 +322,7 @@ private void createKeys(String volumeName, } } - private void createVolumeAndBucket(String volumeName, - String bucketName) + private void createVolume(String volumeName) throws IOException { OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() @@ -324,7 +330,10 @@ private void createVolumeAndBucket(String volumeName, .setAdminName("a") .setVolume(volumeName) .build()); + } + private void addBucketToVolume(String volumeName, String bucketName) + throws IOException { OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName) @@ -365,13 +374,15 @@ private void createKey(OzoneManagerProtocol managerProtocol, * snapshot bucket. */ @Test + @Order(3) public void testSstFilteringService() throws Exception { RDBStore activeDbStore = (RDBStore) om.getMetadataManager().getStore(); String volumeName = "volume"; List bucketNames = Arrays.asList("bucket", "bucket1", "bucket2"); + createVolume(volumeName); for (String bucketName : bucketNames) { - createVolumeAndBucket(volumeName, bucketName); + addBucketToVolume(volumeName, bucketName); } int keyCount = 150; @@ -406,15 +417,14 @@ public void testSstFilteringService() throws Exception { List snapshotNames = Arrays.asList("snap", "snap-1", "snap-2"); for (int i = 0; i < 3; i++) { - writeClient.createSnapshot(volumeName, bucketNames.get(i), - snapshotNames.get(i)); + createSnapshot(volumeName, bucketNames.get(i), snapshotNames.get(i)); } SstFilteringService sstFilteringService = keyManager.getSnapshotSstFilteringService(); - waitForSnapshotsAtLeast(sstFilteringService, 3); - assertEquals(3, sstFilteringService.getSnapshotFilteredCount().get()); + waitForSnapshotsAtLeast(sstFilteringService, countTotalSnapshots); + assertEquals(countTotalSnapshots, sstFilteringService.getSnapshotFilteredCount().get()); Set keyInBucketAfterFilteringRun = getKeysFromSnapshot(volumeName, bucketNames.get(0), @@ -469,4 +479,9 @@ private Set getKeysFromSnapshot(String volume, return getKeysFromDb(omSnapshot.getMetadataManager(), volume, bucket); } } + + private void createSnapshot(String volumeName, String bucketName, String snapshotName) throws IOException { + writeClient.createSnapshot(volumeName, bucketName, snapshotName); + countTotalSnapshots++; + } } From 45c853c5eb56962cdbe5f4cf19aa38f80b2f544e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 12 Feb 2024 19:40:35 +0100 Subject: [PATCH 018/108] HDDS-10325. Make BucketArgs immutable (#6205) --- .../hadoop/ozone/client/BucketArgs.java | 94 ++++++++----------- .../hadoop/ozone/client/rpc/RpcClient.java | 3 +- .../org/apache/hadoop/ozone/OzoneAcl.java | 16 +++- .../AbstractRootedOzoneFileSystemTest.java | 15 +-- .../rpc/TestOzoneRpcClientAbstract.java | 43 ++++----- 5 files changed, 74 insertions(+), 97 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 687605987a68..fee94c55f9ab 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -18,12 +18,15 @@ package org.apache.hadoop.ozone.client; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -37,80 +40,60 @@ public final class BucketArgs { /** * ACL Information. */ - private List acls; + private final ImmutableList acls; /** * Bucket Version flag. */ - private Boolean versioning; + private final boolean versioning; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private StorageType storageType; + private final StorageType storageType; /** * Custom key/value metadata. */ - private Map metadata; + private final Map metadata; /** * Bucket encryption key name. */ - private String bucketEncryptionKey; - private DefaultReplicationConfig defaultReplicationConfig; + private final String bucketEncryptionKey; + private final DefaultReplicationConfig defaultReplicationConfig; private final String sourceVolume; private final String sourceBucket; - private long quotaInBytes; - private long quotaInNamespace; + private final long quotaInBytes; + private final long quotaInNamespace; - private String owner; + private final String owner; /** * Bucket Layout. */ - private BucketLayout bucketLayout = BucketLayout.DEFAULT; - - /** - * Private constructor, constructed via builder. - * @param versioning Bucket version flag. - * @param storageType Storage type to be used. - * @param acls list of ACLs. - * @param metadata map of bucket metadata - * @param bucketEncryptionKey bucket encryption key name - * @param sourceVolume - * @param sourceBucket - * @param quotaInBytes Bucket quota in bytes. - * @param quotaInNamespace Bucket quota in counts. - * @param bucketLayout bucket layout. - * @param owner owner of the bucket. - * @param defaultReplicationConfig default replication config. - */ - @SuppressWarnings("parameternumber") - private BucketArgs(Boolean versioning, StorageType storageType, - List acls, Map metadata, - String bucketEncryptionKey, String sourceVolume, String sourceBucket, - long quotaInBytes, long quotaInNamespace, BucketLayout bucketLayout, - String owner, DefaultReplicationConfig defaultReplicationConfig) { - this.acls = acls; - this.versioning = versioning; - this.storageType = storageType; - this.metadata = metadata; - this.bucketEncryptionKey = bucketEncryptionKey; - this.sourceVolume = sourceVolume; - this.sourceBucket = sourceBucket; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.bucketLayout = bucketLayout; - this.owner = owner; - this.defaultReplicationConfig = defaultReplicationConfig; + private final BucketLayout bucketLayout; + + private BucketArgs(Builder b) { + acls = b.acls == null ? ImmutableList.of() : ImmutableList.copyOf(b.acls); + versioning = b.versioning; + storageType = b.storageType; + metadata = b.metadata == null ? ImmutableMap.of() : ImmutableMap.copyOf(b.metadata); + bucketEncryptionKey = b.bucketEncryptionKey; + sourceVolume = b.sourceVolume; + sourceBucket = b.sourceBucket; + quotaInBytes = b.quotaInBytes; + quotaInNamespace = b.quotaInNamespace; + bucketLayout = b.bucketLayout; + owner = b.owner; + defaultReplicationConfig = b.defaultReplicationConfig; } /** * Returns true if bucket version is enabled, else false. * @return isVersionEnabled */ - public Boolean getVersioning() { + public boolean getVersioning() { return versioning; } @@ -206,7 +189,7 @@ public String getOwner() { * Builder for OmBucketInfo. */ public static class Builder { - private Boolean versioning; + private boolean versioning; private StorageType storageType; private List acls; private Map metadata; @@ -220,12 +203,11 @@ public static class Builder { private DefaultReplicationConfig defaultReplicationConfig; public Builder() { - metadata = new HashMap<>(); quotaInBytes = OzoneConsts.QUOTA_RESET; quotaInNamespace = OzoneConsts.QUOTA_RESET; } - public BucketArgs.Builder setVersioning(Boolean versionFlag) { + public BucketArgs.Builder setVersioning(boolean versionFlag) { this.versioning = versionFlag; return this; } @@ -235,13 +217,19 @@ public BucketArgs.Builder setStorageType(StorageType storage) { return this; } - public BucketArgs.Builder setAcls(List listOfAcls) { - this.acls = listOfAcls; + public BucketArgs.Builder addAcl(OzoneAcl acl) { + if (acls == null) { + acls = new ArrayList<>(); + } + acls.add(acl); return this; } public BucketArgs.Builder addMetadata(String key, String value) { - this.metadata.put(key, value); + if (metadata == null) { + metadata = new HashMap<>(); + } + metadata.put(key, value); return this; } @@ -291,9 +279,7 @@ public BucketArgs.Builder setDefaultReplicationConfig( * @return instance of BucketArgs. */ public BucketArgs build() { - return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey, sourceVolume, sourceBucket, quotaInBytes, - quotaInNamespace, bucketLayout, owner, defaultReplicationConfig); + return new BucketArgs(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 8343b8740169..b3d853be6b59 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -627,8 +627,7 @@ public void createBucket( ugi.getShortUserName() : bucketArgs.getOwner(); } - Boolean isVersionEnabled = bucketArgs.getVersioning() == null ? - Boolean.FALSE : bucketArgs.getVersioning(); + boolean isVersionEnabled = bucketArgs.getVersioning(); StorageType storageType = bucketArgs.getStorageType() == null ? StorageType.DEFAULT : bucketArgs.getStorageType(); BucketLayout bucketLayout = bucketArgs.getBucketLayout(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 7ca0634949c0..2b79d24fd953 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -97,9 +97,23 @@ public OzoneAcl(ACLIdentityType type, String name, ACLType acl, * * @param type - Type * @param name - Name of user - * @param acls - Rights * @param scope - AclScope + * @param acls - Rights */ + public OzoneAcl(ACLIdentityType type, String name, AclScope scope, ACLType... acls) { + this(type, name, bitSetOf(acls), scope); + } + + private static BitSet bitSetOf(ACLType... acls) { + BitSet bits = new BitSet(); + if (acls != null && acls.length > 0) { + for (ACLType acl : acls) { + bits.set(acl.ordinal()); + } + } + return bits; + } + public OzoneAcl(ACLIdentityType type, String name, BitSet acls, AclScope scope) { Objects.requireNonNull(type); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 8ee82633d59a..03167ce11f36 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -90,7 +90,6 @@ import java.util.Arrays; import java.util.BitSet; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; @@ -122,7 +121,6 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; -import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.DELETE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.LIST; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -1223,20 +1221,11 @@ void testSharedTmpDir() throws IOException { } // set acls for shared tmp mount under the tmp volume - List objectAcls = new ArrayList<>(); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); - aclRights.clear(DELETE.ordinal()); - aclRights.set(LIST.ordinal()); - objectAcls.add(new OzoneAcl(ACLIdentityType.WORLD, "", - aclRights, ACCESS)); - objectAcls.add(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, - ACCESS)); // bucket acls have all access to admin and read+write+list access to world - BucketArgs bucketArgs = new BucketArgs.Builder() .setOwner("admin") - .setAcls(Collections.unmodifiableList(objectAcls)) + .addAcl(new OzoneAcl(ACLIdentityType.WORLD, "", ACCESS, READ, WRITE, LIST)) + .addAcl(new OzoneAcl(ACLIdentityType.USER, "admin", userRights, ACCESS)) .setQuotaInNamespace(1000) .setQuotaInBytes(Long.MAX_VALUE).build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 98bf65ad6b6f..28697379072e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -661,12 +661,10 @@ public void testCreateBucketWithAcls() String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", READ, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); assertEquals(bucketName, bucket.getName()); @@ -697,15 +695,13 @@ public void testCreateBucketWithAllArgument() String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); ReplicationConfig repConfig = new ECReplicationConfig(3, 2); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs.Builder builder = BucketArgs.newBuilder(); builder.setVersioning(true) .setStorageType(StorageType.SSD) - .setAcls(acls) + .addAcl(userAcl) .setDefaultReplicationConfig(new DefaultReplicationConfig(repConfig)); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); @@ -754,20 +750,16 @@ public void testRemoveBucketAcl() String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl); volume.createBucket(bucketName, builder.build()); OzoneBucket bucket = volume.getBucket(bucketName); - for (OzoneAcl acl : acls) { - assertTrue(bucket.removeAcl(acl)); - } + assertTrue(bucket.removeAcl(userAcl)); OzoneBucket newBucket = volume.getBucket(bucketName); assertEquals(bucketName, newBucket.getName()); - assertThat(bucket.getAcls()).doesNotContain(acls.get(0)); + assertThat(newBucket.getAcls()).doesNotContain(userAcl); } @Test @@ -777,14 +769,13 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() String bucketName = UUID.randomUUID().toString(); OzoneAcl userAcl = new OzoneAcl(USER, "test", ACLType.ALL, ACCESS); - List acls = new ArrayList<>(); - acls.add(userAcl); - acls.add(new OzoneAcl(USER, "test1", - ACLType.ALL, ACCESS)); + OzoneAcl acl2 = new OzoneAcl(USER, "test1", + ACLType.ALL, ACCESS); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - BucketArgs.Builder builder = BucketArgs.newBuilder(); - builder.setAcls(acls); + BucketArgs.Builder builder = BucketArgs.newBuilder() + .addAcl(userAcl) + .addAcl(acl2); volume.createBucket(bucketName, builder.build()); OzoneObj ozoneObj = OzoneObjInfo.Builder.newBuilder() .setBucketName(bucketName) @@ -793,13 +784,11 @@ public void testRemoveBucketAclUsingRpcClientRemoveAcl() .setResType(OzoneObj.ResourceType.BUCKET).build(); // Remove the 2nd acl added to the list. - boolean remove = store.removeAcl(ozoneObj, acls.get(1)); - assertTrue(remove); - assertThat(store.getAcl(ozoneObj)).doesNotContain(acls.get(1)); + assertTrue(store.removeAcl(ozoneObj, acl2)); + assertThat(store.getAcl(ozoneObj)).doesNotContain(acl2); - remove = store.removeAcl(ozoneObj, acls.get(0)); - assertTrue(remove); - assertThat(store.getAcl(ozoneObj)).doesNotContain(acls.get(0)); + assertTrue(store.removeAcl(ozoneObj, userAcl)); + assertThat(store.getAcl(ozoneObj)).doesNotContain(userAcl); } @Test From c289c670b980025a337f7469a796cc782e2511b0 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 12 Feb 2024 20:13:37 +0100 Subject: [PATCH 019/108] HDDS-10344. Schedule dependabot for weekend (#6209) --- .github/dependabot.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4b4c37f399b7..d591c3bc46c7 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -38,5 +38,7 @@ updates: directory: "/" schedule: interval: "weekly" + day: "saturday" + time: "07:00" # UTC pull-request-branch-name: separator: "-" From 68662a757aab76d2e06456a78b84224714160e52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 22:05:14 +0100 Subject: [PATCH 020/108] HDDS-10347. Bump jacoco to 0.8.11 (#6214) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 4ccf0bb34afd..a457a576e06c 100644 --- a/pom.xml +++ b/pom.xml @@ -146,7 +146,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 10.14.2.0 3.0.2 3.2.4 - 0.8.5 + 0.8.11 3.21.0-GA 1.2.2 2.3.3 From bacb184fd521907fd5ee8dc872203023d67453df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 05:18:56 +0100 Subject: [PATCH 021/108] HDDS-10356. Bump exec-maven-plugin to 3.1.1 (#6215) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index a457a576e06c..7b06872475e9 100644 --- a/pom.xml +++ b/pom.xml @@ -274,7 +274,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.6.1 4.2.2 0.29.0 - 1.3.1 + 3.1.1 2.3.0 1.0-beta-1 1.0-alpha-11 From 7370676dcf476a01094f7efa97aba98780a5073f Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Tue, 13 Feb 2024 12:23:07 +0300 Subject: [PATCH 022/108] HDDS-9680. Use md5 hash of multipart object part's content as ETag (#5668) --- .../org/apache/hadoop/ozone/OzoneConsts.java | 5 + .../content/feature/S3-Tenant-Commands.md | 2 +- .../OzoneMultipartUploadPartListParts.java | 9 +- .../hadoop/ozone/client/rpc/RpcClient.java | 3 +- .../OmMultipartCommitUploadPartInfo.java | 11 +- .../OmMultipartUploadCompleteList.java | 5 +- .../helpers/OmMultipartUploadListParts.java | 3 +- .../hadoop/ozone/om/helpers/OmPartInfo.java | 11 +- ...ManagerProtocolClientSideTranslatorPB.java | 3 +- .../main/smoketest/s3/MultipartUpload.robot | 33 ++-- .../TestOzoneFSWithObjectStoreCreate.java | 9 +- .../hadoop/ozone/TestMultipartObjectGet.java | 4 +- .../client/rpc/TestOzoneAtRestEncryption.java | 18 +- ...TestOzoneClientMultipartUploadWithFSO.java | 173 +++++++++++------- .../rpc/TestOzoneRpcClientAbstract.java | 128 ++++++++----- .../rpc/TestOzoneRpcClientWithRatis.java | 10 +- .../ozone/om/TestObjectStoreWithLegacyFS.java | 11 +- .../TestOzoneManagerHAWithStoppedNodes.java | 10 +- .../src/main/proto/OmClientProtocol.proto | 5 +- .../hadoop/ozone/om/KeyManagerImpl.java | 6 +- .../S3MultipartUploadCommitPartRequest.java | 10 +- .../S3MultipartUploadCompleteRequest.java | 85 +++++++-- .../ozone/om/request/OMRequestTestUtils.java | 38 +++- .../TestS3MultipartUploadCompleteRequest.java | 16 +- .../s3/multipart/TestS3MultipartResponse.java | 2 +- .../TestMultipartUploadCleanupService.java | 4 + .../om/service/TestOpenKeyCleanupService.java | 4 + .../ozone/s3/commontypes/KeyMetadata.java | 4 +- .../CompleteMultipartUploadRequest.java | 10 +- .../CompleteMultipartUploadResponse.java | 3 +- .../ozone/s3/endpoint/CopyObjectResponse.java | 3 +- .../ozone/s3/endpoint/CopyPartResult.java | 3 +- .../ozone/s3/endpoint/EndpointBase.java | 3 +- .../ozone/s3/endpoint/ListPartsResponse.java | 3 +- .../ozone/s3/endpoint/ObjectEndpoint.java | 13 +- .../s3/endpoint/ObjectEndpointStreaming.java | 21 +-- .../hadoop/ozone/client/OzoneBucketStub.java | 32 +++- .../client/OzoneDataStreamOutputStub.java | 4 +- .../ozone/client/OzoneOutputStreamStub.java | 5 +- ...eteMultipartUploadRequestUnmarshaller.java | 4 +- .../ozone/s3/endpoint/TestListParts.java | 6 +- .../endpoint/TestMultipartUploadComplete.java | 6 +- .../endpoint/TestMultipartUploadWithCopy.java | 13 +- .../ozone/s3/endpoint/TestPartUpload.java | 10 +- .../s3/endpoint/TestPartUploadWithStream.java | 11 +- 45 files changed, 527 insertions(+), 245 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 653af11ce8a8..f3c08b252b1f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -587,4 +587,9 @@ private OzoneConsts() { */ public static final String COMPACTION_LOG_TABLE = "compactionLogTable"; + + /** + * S3G multipart upload request's ETag header key. + */ + public static final String ETAG = "ETag"; } diff --git a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md index f9ea5f608461..23c015515035 100644 --- a/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md +++ b/hadoop-hdds/docs/content/feature/S3-Tenant-Commands.md @@ -432,7 +432,7 @@ bash-4.2$ aws s3api --endpoint-url http://s3g:9878 list-objects --bucket bucket- { "Key": "file1", "LastModified": "2022-02-16T00:10:00.000Z", - "ETag": "2022-02-16T00:10:00.000Z", + "ETag": "e99f93dedfe22e9a133dc3c634f14634", "Size": 3811, "StorageClass": "STANDARD" } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index c1902cdb60d2..67f8edf31408 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -104,12 +104,15 @@ public static class PartInfo { private String partName; private long modificationTime; private long size; + private String eTag; - public PartInfo(int number, String name, long time, long size) { + public PartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -127,5 +130,9 @@ public long getModificationTime() { public long getSize() { return size; } + + public String getETag() { + return eTag; + } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index b3d853be6b59..94d6ae9769dc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1952,7 +1952,8 @@ public OzoneMultipartUploadPartListParts listParts(String volumeName, ozoneMultipartUploadPartListParts.addPart( new OzoneMultipartUploadPartListParts.PartInfo( omPartInfo.getPartNumber(), omPartInfo.getPartName(), - omPartInfo.getModificationTime(), omPartInfo.getSize())); + omPartInfo.getModificationTime(), omPartInfo.getSize(), + omPartInfo.getETag())); } return ozoneMultipartUploadPartListParts; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java index 646cb421e434..bbf1a1bdae53 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartCommitUploadPartInfo.java @@ -24,8 +24,15 @@ public class OmMultipartCommitUploadPartInfo { private final String partName; - public OmMultipartCommitUploadPartInfo(String name) { - this.partName = name; + private final String eTag; + + public OmMultipartCommitUploadPartInfo(String partName, String eTag) { + this.partName = partName; + this.eTag = eTag; + } + + public String getETag() { + return eTag; } public String getPartName() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java index 63e6353c1850..ff39661d01b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadCompleteList.java @@ -56,8 +56,9 @@ public Map getMultipartMap() { */ public List getPartsList() { List partList = new ArrayList<>(); - multipartMap.forEach((partNumber, partName) -> partList.add(Part - .newBuilder().setPartName(partName).setPartNumber(partNumber).build())); + multipartMap.forEach((partNumber, eTag) -> partList.add(Part + // set partName equal to eTag for back compatibility (partName is a required property) + .newBuilder().setPartName(eTag).setETag(eTag).setPartNumber(partNumber).build())); return partList; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java index fbf519c22682..0ba0e26acda2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartUploadListParts.java @@ -79,6 +79,7 @@ public void addPartList(List partInfos) { public void addProtoPartList(List partInfos) { partInfos.forEach(partInfo -> partInfoList.add(new OmPartInfo( partInfo.getPartNumber(), partInfo.getPartName(), - partInfo.getModificationTime(), partInfo.getSize()))); + partInfo.getModificationTime(), partInfo.getSize(), + partInfo.getETag()))); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index 2d753a5caa5a..e908c5a025f1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -28,12 +28,15 @@ public class OmPartInfo { private String partName; private long modificationTime; private long size; + private String eTag; - public OmPartInfo(int number, String name, long time, long size) { + public OmPartInfo(int number, String name, long time, long size, + String eTag) { this.partNumber = number; this.partName = name; this.modificationTime = time; this.size = size; + this.eTag = eTag; } public int getPartNumber() { @@ -52,9 +55,13 @@ public long getSize() { return size; } + public String getETag() { + return eTag; + } + public PartInfo getProto() { return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) .setModificationTime(modificationTime) - .setSize(size).build(); + .setSize(size).setETag(eTag).build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 67d798732623..586410275857 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1632,7 +1632,8 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( .getCommitMultiPartUploadResponse(); OmMultipartCommitUploadPartInfo info = new - OmMultipartCommitUploadPartInfo(response.getPartName()); + OmMultipartCommitUploadPartInfo(response.getPartName(), + response.getETag()); return info; } diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index 04cce8fefcd4..3a6ae0e45d45 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -94,21 +94,28 @@ Test Multipart Upload Complete Should contain ${result} UploadId #upload parts - Run Keyword Create Random file 5 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} - ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag - - Execute echo "Part2" > /tmp/part2 - ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} - ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 - Should contain ${result} ETag + Run Keyword Create Random file 5 + ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 --body /tmp/part1 --upload-id ${uploadID} + ${eTag1} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should contain ${result} ETag + ${part1Md5Sum} = Execute md5sum /tmp/part1 | awk '{print $1}' + Should Be Equal As Strings ${eTag1} ${part1Md5Sum} + + Execute echo "Part2" > /tmp/part2 + ${result} = Execute AWSS3APICli upload-part --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 --body /tmp/part2 --upload-id ${uploadID} + ${eTag2} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should contain ${result} ETag + ${part2Md5Sum} = Execute md5sum /tmp/part2 | awk '{print $1}' + Should Be Equal As Strings ${eTag2} ${part2Md5Sum} #complete multipart upload - ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' - Should contain ${result} ${BUCKET} - Should contain ${result} ${PREFIX}/multipartKey1 - Should contain ${result} ETag + ${result} = Execute AWSS3APICli complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 --multipart-upload 'Parts=[{ETag=${eTag1},PartNumber=1},{ETag=${eTag2},PartNumber=2}]' + Should contain ${result} ${BUCKET} + Should contain ${result} ${PREFIX}/multipartKey1 + ${resultETag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + ${expectedResultETag} = Execute echo -n ${eTag1}${eTag2} | md5sum | awk '{print $1}' + Should contain ${result} ETag + Should Be Equal As Strings ${resultETag} "${expectedResultETag}-2" #read file and check the key ${result} = Execute AWSS3ApiCli get-object --bucket ${BUCKET} --key ${PREFIX}/multipartKey1 /tmp/${PREFIX}-multipartKey1.result diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 6dccd604208f..a41dcd80acdc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; @@ -48,6 +49,7 @@ import java.io.FileNotFoundException; import java.net.URI; +import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -57,6 +59,8 @@ import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.assertj.core.api.Assertions.assertThat; @@ -293,10 +297,13 @@ public void testMPUFailDuetoDirectoryCreationBeforeComplete() // This should succeed, as we check during creation of part or during // complete MPU. + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(MD5_HASH) + .digest(b)).toLowerCase()); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); // Should fail, as we have directory with same name. OMException ex = assertThrows(OMException.class, () -> ozoneBucket.completeMultipartUpload(keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 852f351ee25a..cb49f3b320a0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -141,11 +141,11 @@ private CompleteMultipartUploadRequest.Part uploadPart(String uploadID, Response response = REST.put(BUCKET, KEY, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); CompleteMultipartUploadRequest.Part part = new CompleteMultipartUploadRequest.Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 919654d82a9b..0b0149b4d9c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -22,6 +22,7 @@ import java.net.URI; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.time.Instant; import java.util.ArrayList; @@ -34,6 +35,7 @@ import java.util.UUID; import com.google.common.cache.Cache; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; @@ -120,6 +122,7 @@ class TestOzoneAtRestEncryption { private static final int DEFAULT_CRYPTO_BUFFER_SIZE = 8 * 1024; // 8KB // (this is the default Crypto Buffer size as determined by the config // hadoop.security.crypto.buffer.size) + private static MessageDigest eTagProvider; @BeforeAll static void init() throws Exception { @@ -169,6 +172,7 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } @AfterAll @@ -631,14 +635,17 @@ private String uploadStreamPart(OzoneBucket bucket, String keyName, ByteBuffer dataBuffer = ByteBuffer.wrap(data); multipartStreamKey.write(dataBuffer, 0, length); + multipartStreamKey.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); multipartStreamKey.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = multipartStreamKey.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private String uploadPart(OzoneBucket bucket, String keyName, @@ -646,14 +653,17 @@ private String uploadPart(OzoneBucket bucket, String keyName, OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); - assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + return omMultipartCommitUploadPartInfo.getETag(); } private void completeMultipartUpload(OzoneBucket bucket, String keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index 268a192640c6..1e75a4d10a86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -17,8 +17,14 @@ package org.apache.hadoop.ozone.client.rpc; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; + +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -98,6 +104,7 @@ public class TestOzoneClientMultipartUploadWithFSO { private static ObjectStore store = null; private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; + private static MessageDigest eTagProvider; private String volumeName; private String bucketName; @@ -118,6 +125,7 @@ public static void init() throws Exception { conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); } /** @@ -188,6 +196,9 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); // Call initiate multipart upload for the same key again, this should @@ -195,6 +206,9 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws multipartInfo = bucket.initiateMultipartUpload(keyName); assertNotNull(multipartInfo); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(keyName, multipartInfo.getKeyName()); assertNotEquals(multipartInfo.getUploadID(), uploadID); assertNotNull(multipartInfo.getUploadID()); } @@ -208,13 +222,14 @@ public void testUploadPartWithNoOverride() throws IOException { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @Test @@ -224,12 +239,12 @@ public void testUploadPartOverrideWithRatis() throws Exception { ReplicationType.RATIS, THREE); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - sampleData.getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, sampleData.getBytes(UTF_8)); //Overwrite the part by creating part key with same part number. - String partNameNew = uploadPart(bucket, keyName, uploadID, partNumber, - "name".getBytes(UTF_8)); + Pair partNameAndETagNew = uploadPart(bucket, keyName, + uploadID, partNumber, "name".getBytes(UTF_8)); // PartName should be same from old part Name. // AWS S3 for same content generates same partName during upload part. @@ -239,7 +254,10 @@ public void testUploadPartOverrideWithRatis() throws Exception { // So, when a part is override partNames will still be same irrespective // of content in ozone s3. This will make S3 Mpu completeMPU pass when // comparing part names and large file uploads work using aws cp. - assertEquals(partName, partNameNew, "Part names should be same"); + assertEquals(partNameAndETag.getKey(), partNameAndETagNew.getKey()); + + // ETags are not equal due to content differences + assertNotEquals(partNameAndETag.getValue(), partNameAndETagNew.getValue()); // old part bytes written needs discard and have only // new part bytes in quota for this bucket @@ -249,7 +267,8 @@ public void testUploadPartOverrideWithRatis() throws Exception { } @Test - public void testUploadTwiceWithEC() throws IOException { + public void testUploadTwiceWithEC() + throws IOException, NoSuchAlgorithmException { bucketName = UUID.randomUUID().toString(); bucket = getOzoneECBucket(bucketName); @@ -260,12 +279,12 @@ public void testUploadTwiceWithEC() throws IOException { String uploadID = multipartInfo.getUploadID(); int partNumber = 1; - String partName = uploadPart(bucket, keyName, uploadID, partNumber, - data); - - Map partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + partNumber, data); + + Map eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); long replicatedSize = QuotaUtil.getReplicatedSize(data.length, bucket.getReplicationConfig()); @@ -276,12 +295,12 @@ public void testUploadTwiceWithEC() throws IOException { multipartInfo = bucket.initiateMultipartUpload(keyName); uploadID = multipartInfo.getUploadID(); - partName = uploadPart(bucket, keyName, uploadID, partNumber, + partNameAndETag = uploadPart(bucket, keyName, uploadID, partNumber, data); - partsMap = new HashMap<>(); - partsMap.put(partNumber, partName); - bucket.completeMultipartUpload(keyName, uploadID, partsMap); + eTagsMap = new HashMap<>(); + eTagsMap.put(partNumber, partNameAndETag.getValue()); + bucket.completeMultipartUpload(keyName, uploadID, eTagsMap); // used sized should remain same, overwrite previous upload assertEquals(volume.getBucket(bucketName).getUsedBytes(), @@ -289,7 +308,8 @@ public void testUploadTwiceWithEC() throws IOException { } @Test - public void testUploadAbortWithEC() throws IOException { + public void testUploadAbortWithEC() + throws IOException, NoSuchAlgorithmException { byte[] data = generateData(81920, (byte) 97); bucketName = UUID.randomUUID().toString(); @@ -332,19 +352,19 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { ONE); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); + eTagsMap.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, - "data".getBytes(UTF_8)); - partsMap.put(2, partName); + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, + "data".getBytes(UTF_8)); + eTagsMap.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -355,22 +375,24 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() byte[] data = generateData(10000000, (byte) 97); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMap = new TreeMap<>(); - // Upload part 1 and add it to the partsMap for completing the upload. - String partName1 = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName1); + // Upload part 1 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, data); + eTagsMap.put(1, partNameAndETag1.getValue()); - // Upload part 2 and add it to the partsMap for completing the upload. - String partName2 = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName2); + // Upload part 2 and add it to the eTagsMap for completing the upload. + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, data); + eTagsMap.put(2, partNameAndETag2.getValue()); - // Upload part 3 but do not add it to the partsMap. + // Upload part 3 but do not add it to the eTagsMap. uploadPart(bucket, keyName, uploadID, 3, data); - completeMultipartUpload(bucket, keyName, uploadID, partsMap); + completeMultipartUpload(bucket, keyName, uploadID, eTagsMap); - // Check the bucket size. Since part number 3 was not added to the partsMap, + // Check the bucket size. Since part number 3 was not added to the eTagsMap, // the unused part size should be discarded from the bucket size, // 30000000 - 10000000 = 20000000 long bucketSize = volume.getBucket(bucketName).getUsedBytes(); @@ -457,6 +479,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -465,10 +490,13 @@ public void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, uploadID); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -541,12 +569,13 @@ public void testAbortUploadSuccessWithParts() throws Exception { String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); + Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, + 1, "data".getBytes(UTF_8)); OMMetadataManager metadataMgr = cluster.getOzoneManager().getMetadataManager(); - String multipartKey = verifyUploadedPart(uploadID, partName, metadataMgr); + String multipartKey = verifyUploadedPart(uploadID, partNameAndETag.getKey(), + metadataMgr); bucket.abortMultipartUpload(keyName, uploadID); @@ -572,17 +601,17 @@ public void testListMultipartUploadParts() throws Exception { Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -640,7 +669,6 @@ private void verifyPartNamesInDB(Map partsMap, listPartNames.remove(partKeyName); } - assertThat(listPartNames).withFailMessage("Wrong partKeyName format in DB!").isEmpty(); } @@ -662,17 +690,17 @@ public void testListMultipartUploadPartsWithContinuation() Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -734,8 +762,8 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() bucket.listParts(keyName, uploadID, 100, 2); // Should return empty - assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); + assertEquals( RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), ozoneMultipartUploadPartListParts.getReplicationConfig()); @@ -870,27 +898,37 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); + assertEquals(volumeName, multipartInfo.getVolumeName()); + assertEquals(bucketName, multipartInfo.getBucketName()); + assertEquals(kName, multipartInfo.getKeyName()); assertNotNull(multipartInfo.getUploadID()); return uploadID; } - private String uploadPart(OzoneBucket oBucket, String kName, String - uploadID, int partNumber, byte[] data) throws IOException { + private Pair uploadPart(OzoneBucket oBucket, String kName, + String uploadID, int partNumber, + byte[] data) + throws IOException, NoSuchAlgorithmException { OzoneOutputStream ozoneOutputStream = oBucket.createMultipartKey(kName, data.length, partNumber, uploadID); - ozoneOutputStream.write(data, 0, - data.length); + ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); + assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } private void completeMultipartUpload(OzoneBucket oBucket, String kName, @@ -899,6 +937,11 @@ private void completeMultipartUpload(OzoneBucket oBucket, String kName, .completeMultipartUpload(kName, uploadID, partsMap); assertNotNull(omMultipartUploadCompleteInfo); + assertEquals(omMultipartUploadCompleteInfo.getBucket(), oBucket + .getName()); + assertEquals(omMultipartUploadCompleteInfo.getVolume(), oBucket + .getVolumeName()); + assertEquals(omMultipartUploadCompleteInfo.getKey(), kName); assertNotNull(omMultipartUploadCompleteInfo.getHash()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 28697379072e..f2efe84b9c2c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -20,6 +20,8 @@ import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.time.Instant; import java.util.ArrayList; @@ -38,6 +40,9 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.codec.digest.DigestUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec; @@ -137,7 +142,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.DEFAULT_OM_UPDATE_ID; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; @@ -161,6 +168,7 @@ import static org.slf4j.event.Level.DEBUG; import org.apache.ozone.test.tag.Unhealthy; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; @@ -194,6 +202,12 @@ public abstract class TestOzoneRpcClientAbstract { READ, ACCESS); private static OzoneAcl inheritedGroupAcl = new OzoneAcl(GROUP, remoteGroupName, READ, ACCESS); + private static MessageDigest eTagProvider; + + @BeforeAll + public static void initialize() throws NoSuchAlgorithmException { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } /** * Create a MiniOzoneCluster for testing. @@ -1481,6 +1495,7 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); assertEquals(valueLength, store.getVolume(volumeName) @@ -2627,13 +2642,14 @@ void testUploadPartWithNoOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), 1, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); } @ParameterizedTest @@ -2661,6 +2677,7 @@ void testUploadPartOverride(ReplicationConfig replication) OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, sampleData.length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo commitUploadPartInfo = ozoneOutputStream @@ -2668,7 +2685,7 @@ void testUploadPartOverride(ReplicationConfig replication) assertNotNull(commitUploadPartInfo); String partName = commitUploadPartInfo.getPartName(); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // Overwrite the part by creating part key with same part number // and different content. @@ -2676,13 +2693,14 @@ void testUploadPartOverride(ReplicationConfig replication) ozoneOutputStream = bucket.createMultipartKey(keyName, sampleData.length(), partNumber, uploadID); ozoneOutputStream.write(string2Bytes(sampleData), 0, "name".length()); + ozoneOutputStream.getMetadata().put(ETAG, DigestUtils.md5Hex(sampleData)); ozoneOutputStream.close(); commitUploadPartInfo = ozoneOutputStream .getCommitUploadPartInfo(); assertNotNull(commitUploadPartInfo); - assertNotNull(commitUploadPartInfo.getPartName()); + assertNotNull(commitUploadPartInfo.getETag()); // AWS S3 for same content generates same partName during upload part. // In AWS S3 ETag is generated from md5sum. In Ozone right now we @@ -2808,12 +2826,13 @@ public void testMultipartUploadWithACL() throws Exception { // Upload part byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 1); - String partName = uploadPart(bucket, keyName2, uploadId, 1, data); - Map partsMap = new TreeMap<>(); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName2, + uploadId, 1, data); + Map eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, partNameAndETag.getValue()); // Complete multipart upload request - completeMultipartUpload(bucket2, keyName2, uploadId, partsMap); + completeMultipartUpload(bucket2, keyName2, uploadId, eTagsMaps); // User without permission cannot read multi-uploaded object OMException ex = assertThrows(OMException.class, () -> { @@ -2863,21 +2882,21 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { anyReplication()); // Upload Parts - Map partsMap = new TreeMap<>(); + Map eTagsMaps = new TreeMap<>(); // Uploading part 1 with less than min size - String partName = uploadPart(bucket, keyName, uploadID, 1, - "data".getBytes(UTF_8)); - partsMap.put(1, partName); + Pair partNameAndETag = uploadPart(bucket, keyName, + uploadID, 1, "data".getBytes(UTF_8)); + eTagsMaps.put(1, partNameAndETag.getValue()); - partName = uploadPart(bucket, keyName, uploadID, 2, + partNameAndETag = uploadPart(bucket, keyName, uploadID, 2, "data".getBytes(UTF_8)); - partsMap.put(2, partName); + eTagsMaps.put(2, partNameAndETag.getValue()); // Complete multipart upload OzoneTestUtils.expectOmException(ResultCodes.ENTITY_TOO_SMALL, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @Test @@ -2924,11 +2943,11 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(1, UUID.randomUUID().toString()); + TreeMap eTagsMaps = new TreeMap<>(); + eTagsMaps.put(1, DigestUtils.md5Hex(UUID.randomUUID().toString())); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMaps)); } @@ -2950,11 +2969,11 @@ public void testMultipartUploadWithMissingParts() throws Exception { uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); // We have not uploaded any parts, but passing some list it should throw // error. - TreeMap partsMap = new TreeMap<>(); - partsMap.put(3, "random"); + TreeMap eTagsMap = new TreeMap<>(); + eTagsMap.put(3, DigestUtils.md5Hex("random")); OzoneTestUtils.expectOmException(ResultCodes.INVALID_PART, - () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap)); + () -> completeMultipartUpload(bucket, keyName, uploadID, eTagsMap)); } @Test @@ -3053,6 +3072,9 @@ void testCommitPartAfterCompleteUpload() throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = @@ -3061,10 +3083,13 @@ void testCommitPartAfterCompleteUpload() throws Exception { // Do not close output stream for part 2. ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID()); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.write(data, 0, data.length); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -3134,17 +3159,17 @@ void testListMultipartUploadParts(ReplicationConfig replication) Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); @@ -3185,17 +3210,17 @@ void testListMultipartUploadPartsWithContinuation( Map partsMap = new TreeMap<>(); String uploadID = initiateMultipartUpload(bucket, keyName, replication); - String partName1 = uploadPart(bucket, keyName, uploadID, 1, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(1, partName1); + Pair partNameAndETag1 = uploadPart(bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(1, partNameAndETag1.getKey()); - String partName2 = uploadPart(bucket, keyName, uploadID, 2, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(2, partName2); + Pair partNameAndETag2 = uploadPart(bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(2, partNameAndETag2.getKey()); - String partName3 = uploadPart(bucket, keyName, uploadID, 3, - generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); - partsMap.put(3, partName3); + Pair partNameAndETag3 = uploadPart(bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); + partsMap.put(3, partNameAndETag3.getKey()); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); @@ -3632,19 +3657,20 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val, // than 5mb int length = 0; byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val); - String partName = uploadPart(bucket, keyName, uploadID, 1, data); - partsMap.put(1, partName); + Pair partNameAndEtag = uploadPart(bucket, keyName, uploadID, + 1, data); + partsMap.put(1, partNameAndEtag.getValue()); length += data.length; - partName = uploadPart(bucket, keyName, uploadID, 2, data); - partsMap.put(2, partName); + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 2, data); + partsMap.put(2, partNameAndEtag.getValue()); length += data.length; String part3 = UUID.randomUUID().toString(); - partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( + partNameAndEtag = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes( UTF_8)); - partsMap.put(3, partName); + partsMap.put(3, partNameAndEtag.getValue()); length += part3.getBytes(UTF_8).length; // Complete multipart upload request @@ -3701,20 +3727,26 @@ private String initiateMultipartUpload(OzoneBucket bucket, String keyName, return uploadID; } - private String uploadPart(OzoneBucket bucket, String keyName, String - uploadID, int partNumber, byte[] data) throws Exception { + private Pair uploadPart(OzoneBucket bucket, String keyName, + String uploadID, int partNumber, + byte[] data) throws Exception { OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, partNumber, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(ETAG, + DatatypeConverter.printHexBinary(eTagProvider.digest(data)) + .toLowerCase()); ozoneOutputStream.close(); OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo(); assertNotNull(omMultipartCommitUploadPartInfo); + assertNotNull(omMultipartCommitUploadPartInfo.getETag()); assertNotNull(omMultipartCommitUploadPartInfo.getPartName()); - return omMultipartCommitUploadPartInfo.getPartName(); + return Pair.of(omMultipartCommitUploadPartInfo.getPartName(), + omMultipartCommitUploadPartInfo.getETag()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index ffd80f359ff6..febb6fd41c2a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -24,12 +24,15 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.HashMap; import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; @@ -37,6 +40,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.FaultInjector; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -166,7 +170,8 @@ void testGetKeyAndFileWithNetworkTopology() throws IOException { } @Test - public void testMultiPartUploadWithStream() throws IOException { + public void testMultiPartUploadWithStream() + throws IOException, NoSuchAlgorithmException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); @@ -196,6 +201,9 @@ public void testMultiPartUploadWithStream() throws IOException { keyName, valueLength, 1, uploadID); ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0, valueLength); + ozoneStreamOutput.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(sampleData)).toLowerCase()); ozoneStreamOutput.close(); OzoneMultipartUploadPartListParts parts = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java index be2e0a96526e..9c7a0a7032bc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om; +import javax.xml.bind.DatatypeConverter; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; @@ -52,6 +53,8 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.Map; @@ -213,7 +216,8 @@ public void testMultiPartCompleteUpload() throws Exception { } private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( - OzoneBucket bucket, String keyName) throws IOException { + OzoneBucket bucket, String keyName) + throws IOException, NoSuchAlgorithmException { OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)); @@ -226,6 +230,9 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID); ozoneOutputStream.write(data, 0, data.length); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, + DatatypeConverter.printHexBinary(MessageDigest.getInstance(OzoneConsts.MD5_HASH) + .digest(data)).toLowerCase()); ozoneOutputStream.close(); if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { @@ -245,7 +252,7 @@ private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists( ozoneOutputStream.getCommitUploadPartInfo(); Map partsMap = new LinkedHashMap<>(); - partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName()); + partsMap.put(1, omMultipartCommitUploadPartInfo.getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java index ab9f6382f0e1..63202805ec57 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.ozone.om; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -24,6 +25,7 @@ import org.apache.hadoop.hdfs.LogVerificationAppender; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -187,11 +189,12 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); Map partsMap = new HashMap<>(); - partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName()); + partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getETag()); OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap); @@ -362,7 +365,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, for (int i = 0; i < partsMap.size(); i++) { assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), - partInfoList.get(i).getPartName()); + partInfoList.get(i).getETag()); } @@ -379,9 +382,10 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); + ozoneOutputStream.getMetadata().put(OzoneConsts.ETAG, DigestUtils.md5Hex(value)); ozoneOutputStream.close(); - return ozoneOutputStream.getCommitUploadPartInfo().getPartName(); + return ozoneOutputStream.getCommitUploadPartInfo().getETag(); } @Test diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 6a1a51bc3222..5c737fdad928 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1584,8 +1584,9 @@ message MultipartCommitUploadPartRequest { } message MultipartCommitUploadPartResponse { - // This one is returned as Etag for S3. optional string partName = 1; + // This one is returned as Etag for S3. + optional string eTag = 2; } message MultipartUploadCompleteRequest { @@ -1603,6 +1604,7 @@ message MultipartUploadCompleteResponse { message Part { required uint32 partNumber = 1; required string partName = 2; + optional string eTag = 3; } message MultipartUploadAbortRequest { @@ -1675,6 +1677,7 @@ message PartInfo { required string partName = 2; required uint64 modificationTime = 3; required uint64 size = 4; + optional string eTag = 5; } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index d932ed1eff58..2c9419e78d07 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -121,6 +121,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -824,7 +825,10 @@ public OmMultipartUploadListParts listParts(String volumeName, OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), - partKeyInfo.getPartKeyInfo().getDataSize()); + partKeyInfo.getPartKeyInfo().getDataSize(), + partKeyInfo.getPartKeyInfo().getMetadataList().stream() + .filter(keyValue -> keyValue.getKey().equals(ETAG)) + .findFirst().get().getValue()); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index f461bbd1719a..a3e7840ccce5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -238,9 +238,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn correctedSpace); omBucketInfo.incrUsedBytes(correctedSpace); - omResponse.setCommitMultiPartUploadResponse( - MultipartCommitUploadPartResponse.newBuilder() - .setPartName(partName)); + MultipartCommitUploadPartResponse.Builder commitResponseBuilder = MultipartCommitUploadPartResponse.newBuilder() + .setPartName(partName); + String eTag = omKeyInfo.getMetadata().get(OzoneConsts.ETAG); + if (eTag != null) { + commitResponseBuilder.setETag(eTag); + } + omResponse.setCommitMultiPartUploadResponse(commitResponseBuilder); omClientResponse = getOmClientResponse(ozoneManager, oldPartKeyInfo, openKey, omKeyInfo, multipartKey, multipartKeyInfo, omResponse.build(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 99c98e3b48b2..83b46de7fd1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -27,6 +27,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.BiFunction; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.ratis.server.protocol.TermIndex; @@ -80,6 +82,32 @@ public class S3MultipartUploadCompleteRequest extends OMKeyRequest { private static final Logger LOG = LoggerFactory.getLogger(S3MultipartUploadCompleteRequest.class); + private BiFunction eTagBasedValidator = + (part, partKeyInfo) -> { + String eTag = part.getETag(); + AtomicReference dbPartETag = new AtomicReference<>(); + String dbPartName = null; + if (partKeyInfo != null) { + partKeyInfo.getPartKeyInfo().getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().ifPresent(kv -> dbPartETag.set(kv.getValue())); + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(eTag, partKeyInfo == null ? null : + dbPartETag.get(), StringUtils.equals(eTag, dbPartETag.get()) || StringUtils.equals(eTag, dbPartName)); + }; + private BiFunction partNameBasedValidator = + (part, partKeyInfo) -> { + String partName = part.getPartName(); + String dbPartName = null; + if (partKeyInfo != null) { + dbPartName = partKeyInfo.getPartName(); + } + return new MultipartCommitRequestPart(partName, partKeyInfo == null ? null : + dbPartName, StringUtils.equals(partName, dbPartName)); + }; + public S3MultipartUploadCompleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { super(omRequest, bucketLayout); @@ -249,7 +277,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .setVolume(requestedVolume) .setBucket(requestedBucket) .setKey(keyName) - .setHash(omKeyInfo.getMetadata().get("ETag"))); + .setHash(omKeyInfo.getMetadata().get(OzoneConsts.ETAG))); long volumeId = omMetadataManager.getVolumeId(volumeName); long bucketId = omMetadataManager.getBucketId(volumeName, bucketName); @@ -389,7 +417,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, .setOmKeyLocationInfos( Collections.singletonList(keyLocationInfoGroup)) .setAcls(dbOpenKeyInfo.getAcls()) - .addMetadata("ETag", + .addMetadata(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); // Check if db entry has ObjectID. This check is required because // it is possible that between multipart key uploads and complete, @@ -419,7 +447,7 @@ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long trxnLogIndex, omKeyInfo.setModificationTime(keyArgs.getModificationTime()); omKeyInfo.setDataSize(dataSize); omKeyInfo.setReplicationConfig(dbOpenKeyInfo.getReplicationConfig()); - omKeyInfo.getMetadata().put("ETag", + omKeyInfo.getMetadata().put(OzoneConsts.ETAG, multipartUploadedKeyHash(partKeyInfoMap)); } omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); @@ -491,24 +519,19 @@ private long getMultipartDataSize(String requestedVolume, OzoneManager ozoneManager) throws OMException { long dataSize = 0; int currentPartCount = 0; + boolean eTagBasedValidationAvailable = partsList.stream().allMatch(OzoneManagerProtocolProtos.Part::hasETag); // Now do actual logic, and check for any Invalid part during this. for (OzoneManagerProtocolProtos.Part part : partsList) { currentPartCount++; int partNumber = part.getPartNumber(); - String partName = part.getPartName(); - PartKeyInfo partKeyInfo = partKeyInfoMap.get(partNumber); - - String dbPartName = null; - if (partKeyInfo != null) { - dbPartName = partKeyInfo.getPartName(); - } - if (!StringUtils.equals(partName, dbPartName)) { - String omPartName = partKeyInfo == null ? null : dbPartName; + MultipartCommitRequestPart requestPart = eTagBasedValidationAvailable ? + eTagBasedValidator.apply(part, partKeyInfo) : partNameBasedValidator.apply(part, partKeyInfo); + if (!requestPart.isValid()) { throw new OMException( failureMessage(requestedVolume, requestedBucket, keyName) + - ". Provided Part info is { " + partName + ", " + partNumber + - "}, whereas OM has partName " + omPartName, + ". Provided Part info is { " + requestPart.getRequestPartId() + ", " + partNumber + + "}, whereas OM has eTag " + requestPart.getOmPartId(), OMException.ResultCodes.INVALID_PART); } @@ -641,11 +664,41 @@ private String multipartUploadedKeyHash( OmMultipartKeyInfo.PartKeyInfoMap partsList) { StringBuffer keysConcatenated = new StringBuffer(); for (PartKeyInfo partKeyInfo: partsList) { - keysConcatenated.append(KeyValueUtil.getFromProtobuf(partKeyInfo - .getPartKeyInfo().getMetadataList()).get("ETag")); + String partPropertyToComputeHash = KeyValueUtil.getFromProtobuf(partKeyInfo.getPartKeyInfo().getMetadataList()) + .get(OzoneConsts.ETAG); + if (partPropertyToComputeHash == null) { + partPropertyToComputeHash = partKeyInfo.getPartName(); + } + keysConcatenated.append(partPropertyToComputeHash); } return DigestUtils.md5Hex(keysConcatenated.toString()) + "-" + partsList.size(); } + private static class MultipartCommitRequestPart { + private String requestPartId; + + private String omPartId; + + private boolean isValid; + + MultipartCommitRequestPart(String requestPartId, String omPartId, boolean isValid) { + this.requestPartId = requestPartId; + this.omPartId = omPartId; + this.isValid = isValid; + } + + public String getRequestPartId() { + return requestPartId; + } + + public String getOmPartId() { + return omPartId; + } + + public boolean isValid() { + return isValid; + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 21b94ce5f05a..1bd642fce7d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -19,12 +19,19 @@ package org.apache.hadoop.ozone.om.request; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.security.DigestInputStream; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.UUID; +import javax.xml.bind.DatatypeConverter; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; @@ -1005,14 +1012,31 @@ public static OMRequest createCommitPartMPURequest(String volumeName, String bucketName, String keyName, long clientID, long size, String multipartUploadID, int partNumber) { + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + // Just set dummy size. - KeyArgs.Builder keyArgs = - KeyArgs.newBuilder().setVolumeName(volumeName).setKeyName(keyName) - .setBucketName(bucketName) - .setDataSize(size) - .setMultipartNumber(partNumber) - .setMultipartUploadID(multipartUploadID) - .addAllKeyLocations(new ArrayList<>()); + KeyArgs.Builder keyArgs = KeyArgs.newBuilder().setVolumeName(volumeName) + .setKeyName(keyName) + .setBucketName(bucketName) + .setDataSize(size) + .setMultipartNumber(partNumber) + .setMultipartUploadID(multipartUploadID) + .addAllKeyLocations(new ArrayList<>()) + .addMetadata(HddsProtos.KeyValue.newBuilder() + .setKey(OzoneConsts.ETAG) + .setValue(DatatypeConverter.printHexBinary( + new DigestInputStream( + new ByteArrayInputStream( + RandomStringUtils.randomAlphanumeric((int) size) + .getBytes(StandardCharsets.UTF_8)), + eTagProvider) + .getMessageDigest().digest())) + .build()); // Just adding dummy list. As this is for UT only. MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 0a1ce8f7246f..34e32b0e182a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -133,9 +134,14 @@ private String checkValidateAndUpdateCacheSuccess(String volumeName, List partList = new ArrayList<>(); - String partName = getPartName(volumeName, bucketName, keyName, - multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1) + String eTag = s3MultipartUploadCommitPartRequest.getOmRequest() + .getCommitMultiPartUploadRequest() + .getKeyArgs() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(OzoneConsts.ETAG)) + .findFirst().get().getValue(); + partList.add(Part.newBuilder().setETag(eTag).setPartName(eTag).setPartNumber(1) .build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -223,10 +229,10 @@ public void testInvalidPartOrderError() throws Exception { String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(23).build()); partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 1); - partList.add(Part.newBuilder().setPartName(partName).setPartNumber(1).build()); + partList.add(Part.newBuilder().setETag(partName).setPartName(partName).setPartNumber(1).build()); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, bucketName, keyName, multipartUploadID, partList); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 23b543b6ec12..51963a00a1cb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -287,7 +287,7 @@ public S3MultipartUploadCommitPartResponse createS3CommitMPUResponseFSO( .setStatus(status).setSuccess(true) .setCommitMultiPartUploadResponse( OzoneManagerProtocolProtos.MultipartCommitUploadPartResponse - .newBuilder().setPartName(volumeName)).build(); + .newBuilder().setETag(volumeName).setPartName(volumeName)).build(); return new S3MultipartUploadCommitPartResponseWithFSO(omResponse, multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java index 762d8740565f..9fc0f5c0c12d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java @@ -19,11 +19,13 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmTestManagers; @@ -241,6 +243,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setMultipartUploadID(omMultipartInfo.getUploadID()) .setMultipartUploadPartNumber(i) .setAcls(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, + DigestUtils.md5Hex(UUID.randomUUID().toString())) .setReplicationConfig( StandaloneReplicationConfig.getInstance(ONE)) .setLocationInfoList(Collections.emptyList()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java index 418608e8559c..2ef6c341ae73 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om.service; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -27,6 +28,7 @@ import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.ExpiredOpenKeys; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -473,6 +475,8 @@ private void createIncompleteMPUKey(String volumeName, String bucketName, .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setLocationInfoList(Collections.emptyList()) + .addMetadata(OzoneConsts.ETAG, DigestUtils.md5Hex(UUID.randomUUID() + .toString())) .build(); writeClient.commitMultipartUploadPart(commitPartKeyArgs, diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java index 47b59cfcc0e8..8ae48ca4f83e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/commontypes/KeyMetadata.java @@ -21,6 +21,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; +import org.apache.hadoop.ozone.OzoneConsts; + import java.time.Instant; /** @@ -37,7 +39,7 @@ public class KeyMetadata { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; @XmlElement(name = "Size") diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java index 72289470c2ca..af5eafc9f438 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadRequest.java @@ -23,6 +23,8 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; + import java.util.ArrayList; import java.util.List; @@ -55,7 +57,7 @@ public static class Part { @XmlElement(name = "PartNumber") private int partNumber; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public int getPartNumber() { @@ -66,12 +68,12 @@ public void setPartNumber(int partNumber) { this.partNumber = partNumber; } - public String geteTag() { + public String getETag() { return eTag; } - public void seteTag(String eTag) { - this.eTag = eTag; + public void setETag(String eTagHash) { + this.eTag = eTagHash; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java index c636f36b175b..2aa30d6b839b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CompleteMultipartUploadResponse.java @@ -22,6 +22,7 @@ import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; +import org.apache.hadoop.ozone.OzoneConsts; /** * Complete Multipart Upload request response. @@ -41,7 +42,7 @@ public class CompleteMultipartUploadResponse { @XmlElement(name = "Key") private String key; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public String getLocation() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java index 6e114c2e0c64..d1136fe9ed78 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyObjectResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -39,7 +40,7 @@ public class CopyObjectResponse { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java index c4e65aa38ff7..ab30c1f0e7c9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/CopyPartResult.java @@ -25,6 +25,7 @@ import javax.xml.bind.annotation.adapters.XmlJavaTypeAdapter; import java.time.Instant; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; /** @@ -39,7 +40,7 @@ public class CopyPartResult { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; public CopyPartResult() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index 5694d6f9f41b..5810c4ec2a2f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -65,6 +65,7 @@ import org.slf4j.LoggerFactory; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.KB; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; import static org.apache.hadoop.ozone.s3.util.S3Consts.CUSTOM_METADATA_HEADER_PREFIX; @@ -74,8 +75,6 @@ */ public abstract class EndpointBase implements Auditor { - protected static final String ETAG = "ETag"; - protected static final String ETAG_CUSTOM = "etag-custom"; @Inject diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java index fc9da14133c8..8f3fad735441 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListPartsResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.s3.endpoint; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.s3.commontypes.IsoDateAdapter; import javax.xml.bind.annotation.XmlAccessType; @@ -154,7 +155,7 @@ public static class Part { @XmlElement(name = "LastModified") private Instant lastModified; - @XmlElement(name = "ETag") + @XmlElement(name = OzoneConsts.ETAG) private String eTag; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1e247c8eb858..4a36ad9e62a8 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -101,6 +101,7 @@ import java.util.OptionalLong; import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH; +import static javax.ws.rs.core.HttpHeaders.ETAG; import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; @@ -150,7 +151,7 @@ public class ObjectEndpoint extends EndpointBase { static { E_TAG_PROVIDER = ThreadLocal.withInitial(() -> { try { - return MessageDigest.getInstance("Md5"); + return MessageDigest.getInstance(OzoneConsts.MD5_HASH); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } @@ -807,7 +808,7 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo; try { for (CompleteMultipartUploadRequest.Part part : partList) { - partsMap.put(part.getPartNumber(), part.geteTag()); + partsMap.put(part.getPartNumber(), part.getETag()); } if (LOG.isDebugEnabled()) { LOG.debug("Parts map {}", partsMap); @@ -955,6 +956,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge( sourceObject, ozoneOutputStream, 0, length); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } else { @@ -964,6 +967,8 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, metadataLatencyNs = getMetrics().updateCopyKeyMetadataStats(startNanos); copyLength = IOUtils.copyLarge(sourceObject, ozoneOutputStream); + ozoneOutputStream.getMetadata() + .putAll(sourceKeyDetails.getMetadata()); keyOutputStream = ozoneOutputStream.getKeyOutputStream(); } } @@ -993,7 +998,7 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, assert keyOutputStream != null; OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = keyOutputStream.getCommitUploadPartInfo(); - String eTag = omMultipartCommitUploadPartInfo.getPartName(); + String eTag = omMultipartCommitUploadPartInfo.getETag(); if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1064,7 +1069,7 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getPartName()); + part.setETag(partInfo.getETag()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index e509acb05bdb..bbb743ee3597 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -21,12 +21,11 @@ import javax.xml.bind.DatatypeConverter; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; @@ -110,7 +109,7 @@ public static Pair putKeyWithStream( eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) .toLowerCase(); perf.appendMetaLatencyNanos(metadataLatencyNs); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return Pair.of(eTag, writeLen); } @@ -161,11 +160,6 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, throws IOException, OS3Exception { long startNanos = Time.monotonicNowNanos(); String eTag; - // OmMultipartCommitUploadPartInfo can only be gotten after the - // OzoneDataStreamOutput is closed, so we need to save the - // KeyDataStreamOutput in the OzoneDataStreamOutput and use it to get the - // OmMultipartCommitUploadPartInfo after OzoneDataStreamOutput is closed. - KeyDataStreamOutput keyDataStreamOutput = null; try { try (OzoneDataStreamOutput streamOutput = ozoneBucket .createMultipartStreamKey(key, length, partNumber, uploadID)) { @@ -174,11 +168,10 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, writeToStreamOutput(streamOutput, body, chunkSize, length); eTag = DatatypeConverter.printHexBinary( body.getMessageDigest().digest()).toLowerCase(); - ((KeyMetadataAware)streamOutput).getMetadata().put("ETag", eTag); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); METRICS.incPutKeySuccessLength(putLength); perf.appendMetaLatencyNanos(metadataLatencyNs); perf.appendSizeBytes(putLength); - keyDataStreamOutput = streamOutput.getKeyDataStreamOutput(); } } catch (OMException ex) { if (ex.getResult() == @@ -190,13 +183,7 @@ public static Response createMultipartKey(OzoneBucket ozoneBucket, String key, ozoneBucket.getName() + "/" + key); } throw ex; - } finally { - if (keyDataStreamOutput != null) { - OmMultipartCommitUploadPartInfo commitUploadPartInfo = - keyDataStreamOutput.getCommitUploadPartInfo(); - eTag = commitUploadPartInfo.getPartName(); - } } - return Response.ok().header("ETag", eTag).build(); + return Response.ok().header(OzoneConsts.ETAG, eTag).build(); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index fad3386c61c4..39ae9cc4af17 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -23,6 +23,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -32,6 +34,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import javax.xml.bind.DatatypeConverter; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -53,6 +56,8 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.util.Time; +import static org.apache.hadoop.ozone.OzoneConsts.ETAG; +import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** @@ -267,7 +272,8 @@ public void close() throws IOException { byte[] bytes = new byte[position]; buffer.get(bytes); - Part part = new Part(key + size, bytes); + Part part = new Part(key + size, bytes, + getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -425,7 +431,7 @@ public OzoneOutputStream createMultipartKey(String key, long size, @Override public void close() throws IOException { Part part = new Part(key + size, - toByteArray()); + toByteArray(), getMetadata().get(ETAG)); if (partList.get(key) == null) { Map parts = new TreeMap<>(); parts.put(partNumber, part); @@ -463,7 +469,7 @@ public OmMultipartUploadCompleteInfo completeMultipartUpload(String key, for (Map.Entry part: partsMap.entrySet()) { Part recordedPart = partsList.get(part.getKey()); if (recordedPart == null || - !recordedPart.getPartName().equals(part.getValue())) { + !recordedPart.getETag().equals(part.getValue())) { throw new OMException(ResultCodes.INVALID_PART); } else { output.write(recordedPart.getContent()); @@ -506,13 +512,21 @@ public OzoneMultipartUploadPartListParts listParts(String key, int count = 0; int nextPartNumberMarker = 0; boolean truncated = false; + MessageDigest eTagProvider; + try { + eTagProvider = MessageDigest.getInstance(MD5_HASH); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } while (count < maxParts && partIterator.hasNext()) { Map.Entry partEntry = partIterator.next(); nextPartNumberMarker = partEntry.getKey(); if (partEntry.getKey() > partNumberMarker) { PartInfo partInfo = new PartInfo(partEntry.getKey(), partEntry.getValue().getPartName(), - Time.now(), partEntry.getValue().getContent().length); + Time.now(), partEntry.getValue().getContent().length, + DatatypeConverter.printHexBinary(eTagProvider.digest(partEntry + .getValue().getContent())).toLowerCase()); partInfoList.add(partInfo); count++; } @@ -563,9 +577,12 @@ public static class Part { private String partName; private byte[] content; - public Part(String name, byte[] data) { + private String eTag; + + public Part(String name, byte[] data, String eTag) { this.partName = name; this.content = data.clone(); + this.eTag = eTag; } public String getPartName() { @@ -575,6 +592,11 @@ public String getPartName() { public byte[] getContent() { return content.clone(); } + + public String getETag() { + return eTag; + } + } @Override diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index 7bb35682d8da..b472320b7fe7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.client; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -65,6 +66,7 @@ public synchronized void close() throws IOException { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index 983516002909..da2fb26ec8f5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -22,6 +22,8 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.io.KeyMetadataAware; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.StreamBufferArgs; import org.apache.hadoop.ozone.client.io.KeyOutputStream; @@ -93,7 +95,8 @@ public KeyOutputStream getKeyOutputStream() { @Override public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return closed ? new OmMultipartCommitUploadPartInfo(partName) : null; + return closed ? new OmMultipartCommitUploadPartInfo(partName, + ((KeyMetadataAware)getOutputStream()).getMetadata().get(OzoneConsts.ETAG)) : null; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java index ab87f9c98e11..cd0fbfed4e65 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestCompleteMultipartUploadRequestUnmarshaller.java @@ -80,8 +80,8 @@ private void checkContent(CompleteMultipartUploadRequest request) { List parts = request.getPartList(); - assertEquals(part1, parts.get(0).geteTag()); - assertEquals(part2, parts.get(1).geteTag()); + assertEquals(part1, parts.get(0).getETag()); + assertEquals(part2, parts.get(1).getETag()); } private CompleteMultipartUploadRequest unmarshall( diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 3e8beb2c3a1e..677367e6d812 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -79,17 +79,17 @@ public static void setUp() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 2, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 3, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index eedee2855e7d..3c0c87a177f6 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -93,9 +93,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -202,7 +202,7 @@ public void testMultipartInvalidPartError() throws Exception { Part part1 = uploadPart(key, uploadID, partNumber, content); // Change part name. - part1.seteTag("random"); + part1.setETag("random"); partsList.add(part1); content = "Multipart Upload 2"; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index a773b8757981..d9595aeff796 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.Scanner; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -91,7 +92,11 @@ public static void setUp() throws Exception { try (OutputStream stream = bucket .createKey(EXISTING_KEY, keyContent.length, ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, - ReplicationFactor.THREE), new HashMap<>())) { + ReplicationFactor.THREE), + new HashMap() {{ + put(OzoneConsts.ETAG, DigestUtils.md5Hex(EXISTING_KEY_CONTENT)); + }} + )) { stream.write(keyContent); } @@ -327,9 +332,9 @@ private Part uploadPart(String key, String uploadID, int partNumber, String Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); Part part = new Part(); - part.seteTag(response.getHeaderString("ETag")); + part.setETag(response.getHeaderString(OzoneConsts.ETAG)); part.setPartNumber(partNumber); return part; @@ -377,7 +382,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, assertNotNull(result.getETag()); assertNotNull(result.getLastModified()); Part part = new Part(); - part.seteTag(result.getETag()); + part.setETag(result.getETag()); part.setPartNumber(partNumber); return part; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 90d490dea0b6..bb1b7037bd9a 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -90,7 +90,7 @@ public void testPartUpload() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -112,16 +112,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 787aa6e8777a..775d5a197693 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -95,7 +96,7 @@ public void testPartUpload() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); } @@ -116,16 +117,16 @@ public void testPartUploadWithOverride() throws Exception { response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); - String eTag = response.getHeaderString("ETag"); + String eTag = response.getHeaderString(OzoneConsts.ETAG); // Upload part again with same part Number, the ETag should be changed. content = "Multipart Upload Changed"; response = REST.put(S3BUCKET, S3KEY, content.length(), 1, uploadID, body); - assertNotNull(response.getHeaderString("ETag")); - assertNotEquals(eTag, response.getHeaderString("ETag")); + assertNotNull(response.getHeaderString(OzoneConsts.ETAG)); + assertNotEquals(eTag, response.getHeaderString(OzoneConsts.ETAG)); } From 3c4683e71ba0de9aa73ed84efade7b5e13a7d14e Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Tue, 13 Feb 2024 18:53:20 +0530 Subject: [PATCH 023/108] HDDS-9738. Display startTime, pipeline and container counts for decommissioning datanode (#6185) --- .../hadoop/hdds/scm/client/ScmClient.java | 2 + .../StorageContainerLocationProtocol.java | 2 + ...ocationProtocolClientSideTranslatorPB.java | 11 + .../src/main/proto/ScmAdminProtocol.proto | 11 + .../apache/hadoop/hdds/scm/FetchMetrics.java | 220 ++++++++++++++++++ ...ocationProtocolServerSideTranslatorPB.java | 12 + .../scm/server/SCMClientProtocolServer.java | 7 + .../hdds/scm/node/TestFetchMetrics.java | 46 ++++ .../scm/cli/ContainerOperationClient.java | 5 + .../DecommissionStatusSubCommand.java | 45 ++++ .../TestDecommissionStatusSubCommand.java | 83 +++++-- 11 files changed, 421 insertions(+), 23 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 402398e36c3f..fb5a2deee26d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -452,4 +452,6 @@ StatusAndMessages queryUpgradeFinalizationProgress( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index e8bddb42cfbd..663f317a3b3b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -474,4 +474,6 @@ List getListOfContainers( DecommissionScmResponseProto decommissionScm( String scmId) throws IOException; + + String getMetrics(String query) throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 84a0fa4886ce..109358c67bf6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -68,6 +68,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; @@ -1143,4 +1145,13 @@ public DecommissionScmResponseProto decommissionScm( .getDecommissionScmResponse(); return response; } + + @Override + public String getMetrics(String query) throws IOException { + GetMetricsRequestProto request = GetMetricsRequestProto.newBuilder().setQuery(query).build(); + GetMetricsResponseProto response = submitRequest(Type.GetMetrics, + builder -> builder.setGetMetricsRequest(request)).getGetMetricsResponse(); + String metricsJsonStr = response.getMetricsJson(); + return metricsJsonStr; + } } diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 6adca817ed1d..e8b8d623942a 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -83,6 +83,7 @@ message ScmContainerLocationRequest { optional DecommissionScmRequestProto decommissionScmRequest = 44; optional SingleNodeQueryRequestProto singleNodeQueryRequest = 45; optional GetContainersOnDecomNodeRequestProto getContainersOnDecomNodeRequest = 46; + optional GetMetricsRequestProto getMetricsRequest = 47; } message ScmContainerLocationResponse { @@ -137,6 +138,7 @@ message ScmContainerLocationResponse { optional DecommissionScmResponseProto decommissionScmResponse = 44; optional SingleNodeQueryResponseProto singleNodeQueryResponse = 45; optional GetContainersOnDecomNodeResponseProto getContainersOnDecomNodeResponse = 46; + optional GetMetricsResponseProto getMetricsResponse = 47; enum Status { OK = 1; @@ -190,6 +192,7 @@ enum Type { DecommissionScm = 40; SingleNodeQuery = 41; GetContainersOnDecomNode = 42; + GetMetrics = 43; } /** @@ -618,6 +621,14 @@ message GetContainersOnDecomNodeResponseProto { repeated ContainersOnDecomNodeProto containersOnDecomNode = 1; } +message GetMetricsRequestProto { + optional string query = 1; +} + +message GetMetricsResponseProto { + optional string metricsJson = 1; +} + /** * Protocol used from an HDFS node to StorageContainerManager. See the request * and response messages for details of the RPC calls. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java new file mode 100644 index 000000000000..0778b9a30dc3 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/FetchMetrics.java @@ -0,0 +1,220 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import com.fasterxml.jackson.core.JsonEncoding; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.reflect.Array; +import java.nio.charset.StandardCharsets; +import java.util.Iterator; +import java.util.Set; +import javax.management.AttributeNotFoundException; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.MBeanAttributeInfo; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; +import javax.management.QueryExp; +import javax.management.ReflectionException; +import javax.management.RuntimeErrorException; +import javax.management.RuntimeMBeanException; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.TabularData; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Class used to fetch metrics from MBeanServer. + */ +public class FetchMetrics { + private static final Logger LOG = LoggerFactory.getLogger(FetchMetrics.class); + private transient MBeanServer mBeanServer; + private transient JsonFactory jsonFactory; + + public FetchMetrics() { + this.mBeanServer = ManagementFactory.getPlatformMBeanServer(); + this.jsonFactory = new JsonFactory(); + } + + public String getMetrics(String qry) { + try { + JsonGenerator jg = null; + ByteArrayOutputStream opStream = new ByteArrayOutputStream(); + + try { + jg = this.jsonFactory.createGenerator(opStream, JsonEncoding.UTF8); + jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); + jg.useDefaultPrettyPrinter(); + jg.writeStartObject(); + if (qry == null) { + qry = "*:*"; + } + this.listBeans(jg, new ObjectName(qry)); + } finally { + if (jg != null) { + jg.close(); + } + } + return new String(opStream.toByteArray(), StandardCharsets.UTF_8); + } catch (IOException | MalformedObjectNameException ex) { + LOG.error("Caught an exception while processing getMetrics request", ex); + } + return null; + } + + private void listBeans(JsonGenerator jg, ObjectName qry) + throws IOException { + LOG.debug("Listing beans for " + qry); + Set names = null; + names = this.mBeanServer.queryNames(qry, (QueryExp) null); + jg.writeArrayFieldStart("beans"); + Iterator it = names.iterator(); + + while (it.hasNext()) { + ObjectName oname = (ObjectName) it.next(); + String code = ""; + + MBeanInfo minfo; + try { + minfo = this.mBeanServer.getMBeanInfo(oname); + code = minfo.getClassName(); + String prs = ""; + + try { + if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { + prs = "modelerType"; + code = (String) this.mBeanServer.getAttribute(oname, prs); + } + } catch (AttributeNotFoundException | MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + prs + " of " + oname + " threw an exception", ex); + } + } catch (InstanceNotFoundException var17) { + continue; + } catch (IntrospectionException | ReflectionException ex) { + LOG.error("Problem while trying to process JMX query: " + qry + " with MBean " + oname, ex); + continue; + } + jg.writeStartObject(); + jg.writeStringField("name", oname.toString()); + jg.writeStringField("modelerType", code); + MBeanAttributeInfo[] attrs = minfo.getAttributes(); + for (int i = 0; i < attrs.length; ++i) { + this.writeAttribute(jg, oname, attrs[i]); + } + jg.writeEndObject(); + } + jg.writeEndArray(); + } + + private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException { + if (attr.isReadable()) { + String attName = attr.getName(); + if (!"modelerType".equals(attName)) { + if (attName.indexOf("=") < 0 && attName.indexOf(":") < 0 && attName.indexOf(" ") < 0) { + Object value = null; + + try { + value = this.mBeanServer.getAttribute(oname, attName); + } catch (RuntimeMBeanException var7) { + if (var7.getCause() instanceof UnsupportedOperationException) { + LOG.debug("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } else { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", var7); + } + return; + } catch (RuntimeErrorException var8) { + LOG.error("getting attribute {} of {} threw an exception", new Object[]{attName, oname, var8}); + return; + } catch (MBeanException | RuntimeException | ReflectionException ex) { + LOG.error("getting attribute " + attName + " of " + oname + " threw an exception", ex); + return; + } catch (AttributeNotFoundException | InstanceNotFoundException ex) { + return; + } + this.writeAttribute(jg, attName, value); + } + } + } + } + + private void writeAttribute(JsonGenerator jg, String attName, Object value) throws IOException { + jg.writeFieldName(attName); + this.writeObject(jg, value); + } + + private void writeObject(JsonGenerator jg, Object value) throws IOException { + if (value == null) { + jg.writeNull(); + } else { + Class c = value.getClass(); + Object entry; + if (c.isArray()) { + jg.writeStartArray(); + int len = Array.getLength(value); + + for (int j = 0; j < len; ++j) { + entry = Array.get(value, j); + this.writeObject(jg, entry); + } + + jg.writeEndArray(); + } else if (value instanceof Number) { + Number n = (Number) value; + jg.writeNumber(n.toString()); + } else if (value instanceof Boolean) { + Boolean b = (Boolean) value; + jg.writeBoolean(b); + } else if (value instanceof CompositeData) { + CompositeData cds = (CompositeData) value; + CompositeType comp = cds.getCompositeType(); + Set keys = comp.keySet(); + jg.writeStartObject(); + Iterator var7 = keys.iterator(); + + while (var7.hasNext()) { + String key = (String) var7.next(); + this.writeAttribute(jg, key, cds.get(key)); + } + + jg.writeEndObject(); + } else if (value instanceof TabularData) { + TabularData tds = (TabularData) value; + jg.writeStartArray(); + Iterator var14 = tds.values().iterator(); + + while (var14.hasNext()) { + entry = var14.next(); + this.writeObject(jg, entry); + } + jg.writeEndArray(); + } else { + jg.writeString(value.toString()); + } + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index f402b9309fe4..a44536bf4463 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -72,6 +72,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetMetricsResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; @@ -714,6 +716,12 @@ public ScmContainerLocationResponse processRequest( .setDecommissionScmResponse(decommissionScm( request.getDecommissionScmRequest())) .build(); + case GetMetrics: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetMetricsResponse(getMetrics(request.getGetMetricsRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -1287,4 +1295,8 @@ public DecommissionScmResponseProto decommissionScm( return impl.decommissionScm( request.getScmId()); } + + public GetMetricsResponseProto getMetrics(GetMetricsRequestProto request) throws IOException { + return GetMetricsResponseProto.newBuilder().setMetricsJson(impl.getMetrics(request.getQuery())).build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 13bef8590b79..faee4fcaaab7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -63,6 +63,7 @@ import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; +import org.apache.hadoop.hdds.scm.FetchMetrics; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -1373,4 +1374,10 @@ public DecommissionScmResponseProto decommissionScm( } return decommissionScmResponseBuilder.build(); } + + @Override + public String getMetrics(String query) throws IOException { + FetchMetrics fetchMetrics = new FetchMetrics(); + return fetchMetrics.getMetrics(query); + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java new file mode 100644 index 000000000000..ede005745e5e --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestFetchMetrics.java @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.node; + +import org.apache.hadoop.hdds.scm.FetchMetrics; +import org.junit.jupiter.api.Test; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +class TestFetchMetrics { + private static FetchMetrics fetchMetrics = new FetchMetrics(); + + @Test + public void testFetchAll() { + String result = fetchMetrics.getMetrics(null); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } + + @Test + public void testFetchFiltered() { + String result = fetchMetrics.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + Pattern p = Pattern.compile("beans", Pattern.MULTILINE); + Matcher m = p.matcher(result); + assertTrue(m.find()); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index d07e696e7ef0..6a5550e9fbd3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -563,4 +563,9 @@ public DecommissionScmResponseProto decommissionScm( return storageContainerLocationClient.decommissionScm(scmId); } + @Override + public String getMetrics(String query) throws IOException { + return storageContainerLocationClient.getMetrics(query); + } + } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index b53632f8eec5..17d577ff2dc7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -17,6 +17,10 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.base.Strings; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -27,6 +31,9 @@ import picocli.CommandLine; import java.io.IOException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Date; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -81,17 +88,55 @@ public void execute(ScmClient scmClient) throws IOException { decommissioningNodes.size() + " node(s)"); } + String metricsJson = scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics"); + int numDecomNodes = -1; + JsonNode jsonNode = null; + if (metricsJson != null) { + ObjectMapper objectMapper = new ObjectMapper(); + JsonFactory factory = objectMapper.getFactory(); + JsonParser parser = factory.createParser(metricsJson); + jsonNode = (JsonNode) objectMapper.readTree(parser).get("beans").get(0); + JsonNode totalDecom = jsonNode.get("DecommissioningMaintenanceNodesTotal"); + numDecomNodes = (totalDecom == null ? -1 : Integer.parseInt(totalDecom.toString())); + } + for (HddsProtos.Node node : decommissioningNodes) { DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf( node.getNodeID()); printDetails(datanode); + printCounts(datanode, jsonNode, numDecomNodes); Map> containers = scmClient.getContainersOnDecomNode(datanode); System.out.println(containers); } } + private void printDetails(DatanodeDetails datanode) { System.out.println("\nDatanode: " + datanode.getUuid().toString() + " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress() + "/" + datanode.getHostName() + ")"); } + + private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecomNodes) { + try { + for (int i = 1; i <= numDecomNodes; i++) { + if (datanode.getHostName().equals(counts.get("tag.datanode." + i).asText())) { + int pipelines = Integer.parseInt(counts.get("PipelinesWaitingToCloseDN." + i).toString()); + double underReplicated = Double.parseDouble(counts.get("UnderReplicatedDN." + i).toString()); + double unclosed = Double.parseDouble(counts.get("UnclosedContainersDN." + i).toString()); + long startTime = Long.parseLong(counts.get("StartTimeDN." + i).toString()); + System.out.print("Decommission started at : "); + Date date = new Date(startTime); + DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); + System.out.println(formatter.format(date)); + System.out.println("No. of Pipelines: " + pipelines); + System.out.println("No. of UnderReplicated containers: " + underReplicated); + System.out.println("No. of Unclosed Containers: " + unclosed); + return; + } + } + System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); + } catch (NullPointerException ex) { + System.err.println("Error getting pipeline and container counts for " + datanode.getHostName()); + } + } } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index 41c31caf1f0a..ad0323d334e6 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -60,6 +60,7 @@ public class TestDecommissionStatusSubCommand { private DecommissionStatusSubCommand cmd; private List nodes = getNodeDetails(2); private Map> containerOnDecom = getContainersOnDecomNodes(); + private ArrayList metrics = getMetrics(); @BeforeEach public void setup() throws UnsupportedEncodingException { @@ -80,6 +81,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -91,15 +93,17 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)"); + p = Pattern.compile("No\\. of Pipelines:"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + assertTrue(m.find()); // metrics for both are shown + p = Pattern.compile("UnderReplicated=.* UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); + assertTrue(m.find()); // container lists for both are shown } @Test @@ -109,6 +113,7 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenReturn(new ArrayList<>()); when(scmClient.getContainersOnDecomNode(any())).thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(0)); cmd.execute(scmClient); Pattern p = Pattern.compile("Decommission\\sStatus:\\s" + @@ -117,10 +122,10 @@ public void testNoNodesWhenDecommissionStatus() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -131,24 +136,22 @@ public void testIdOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(0).getNodeID().getUuid()); cmd.execute(scmClient); // check status of host0 - Pattern p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host0\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as uuid of only host0 is passed, host1 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host1.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -161,6 +164,7 @@ public void testIdOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--id", nodes.get(1).getNodeID().getUuid()); @@ -172,10 +176,10 @@ public void testIdOptionDecommissionStatusFail() throws IOException { assertTrue(m.find()); // no host details are shown - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -186,24 +190,22 @@ public void testIpOptionDecommissionStatusSuccess() throws IOException { when(scmClient.queryNode(any(), any(), any(), any())) .thenAnswer(invocation -> nodes); // 2 nodes decommissioning when(scmClient.getContainersOnDecomNode(any())).thenReturn(containerOnDecom); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(1)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); cmd.execute(scmClient); // check status of host1 - Pattern p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + Pattern p = Pattern.compile("Datanode:\\s.*host1\\)"); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("host1.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); - m = p.matcher(outContent.toString(DEFAULT_ENCODING)); - assertTrue(m.find()); - // as IpAddress of only host1 is passed, host0 should NOT be displayed - p = Pattern.compile("Datanode:\\s.*host0.\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0.\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("host0.*[\r\n].*UnderReplicated.*UnClosed", Pattern.MULTILINE); + p = Pattern.compile("UnderReplicated=.*UnClosed="); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); assertFalse(m.find()); } @@ -216,6 +218,7 @@ public void testIpOptionDecommissionStatusFail() throws IOException { .thenReturn(containerOnDecom); when(scmClient.getContainersOnDecomNode(DatanodeDetails.getFromProtoBuf(nodes.get(1).getNodeID()))) .thenReturn(new HashMap<>()); + when(scmClient.getMetrics(any())).thenReturn(metrics.get(2)); CommandLine c = new CommandLine(cmd); c.parseArgs("--ip", nodes.get(1).getNodeID().getIpAddress()); @@ -226,11 +229,11 @@ public void testIpOptionDecommissionStatusFail() throws IOException { Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("Datanode:\\s.*host0\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host0\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); - p = Pattern.compile("Datanode:\\s.*host1\\)", Pattern.MULTILINE); + p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertFalse(m.find()); } @@ -275,4 +278,38 @@ private Map> getContainersOnDecomNodes() { return containerMap; } + private ArrayList getMetrics() { + ArrayList result = new ArrayList<>(); + // no nodes decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 0, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 0, " + + "\"ContainersUnderReplicatedTotal\" : 0, \"ContainersUnClosedTotal\" : 0, " + + "\"ContainersSufficientlyReplicatedTotal\" : 0 } ]}"); + // 2 nodes in decommisioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 2, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 2, " + + "\"ContainersUnderReplicatedTotal\" : 6, \"ContainersUnclosedTotal\" : 6, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\", \"tag.Hostname.1\" : \"host0\", " + + "\"PipelinesWaitingToCloseDN.1\" : 1, \"UnderReplicatedDN.1\" : 3, " + + "\"SufficientlyReplicatedDN.1\" : 0, \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 111211, " + + "\"tag.datanode.2\" : \"host1\", \"tag.Hostname.2\" : \"host1\", " + + "\"PipelinesWaitingToCloseDN.2\" : 1, \"UnderReplicatedDN.2\" : 3, " + + "\"SufficientlyReplicatedDN.2\" : 0, \"UnclosedContainersDN.2\" : 3, \"StartTimeDN.2\" : 221221} ]}"); + // only host 1 decommissioning + result.add("{ \"beans\" : [ { " + + "\"name\" : \"Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics\", " + + "\"modelerType\" : \"NodeDecommissionMetrics\", \"DecommissioningMaintenanceNodesTotal\" : 1, " + + "\"RecommissionNodesTotal\" : 0, \"PipelinesWaitingToCloseTotal\" : 1, " + + "\"ContainersUnderReplicatedTotal\" : 3, \"ContainersUnclosedTotal\" : 3, " + + "\"ContainersSufficientlyReplicatedTotal\" : 10, " + + "\"tag.datanode.1\" : \"host0\",\n \"tag.Hostname.1\" : \"host0\",\n " + + "\"PipelinesWaitingToCloseDN.1\" : 1,\n \"UnderReplicatedDN.1\" : 3,\n " + + "\"SufficientlyReplicatedDN.1\" : 0,\n \"UnclosedContainersDN.1\" : 3, \"StartTimeDN.1\" : 221221} ]}"); + return result; + } } From 44adf80324f85853b98ddbf2e2ce0a074f354f3c Mon Sep 17 00:00:00 2001 From: ashishkumar50 <117710273+ashishkumar50@users.noreply.github.com> Date: Wed, 14 Feb 2024 14:10:40 +0530 Subject: [PATCH 024/108] HDDS-10359. Recursively deleting volume with OBS bucket shows error despite success (#6217) --- .../hadoop/ozone/shell/TestOzoneShellHA.java | 92 ++++++++++--------- .../shell/volume/DeleteVolumeHandler.java | 6 ++ 2 files changed, 55 insertions(+), 43 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 9596e96026a4..51956accb1c4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -82,6 +82,9 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY; +import static org.apache.hadoop.ozone.om.helpers.BucketLayout.OBJECT_STORE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -97,6 +100,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -1836,9 +1841,10 @@ public void testVolumeListKeys() OMException exception = (OMException) execution.getCause(); assertEquals(VOLUME_NOT_FOUND, exception.getResult()); } - - @Test - public void testRecursiveVolumeDelete() + + @ParameterizedTest + @ValueSource(ints = {1, 5}) + public void testRecursiveVolumeDelete(int threadCount) throws Exception { String volume1 = "volume10"; String volume2 = "volume20"; @@ -1847,47 +1853,19 @@ public void testRecursiveVolumeDelete() // Create bucket bucket1 with layout FILE_SYSTEM_OPTIMIZED // Insert some keys into it generateKeys(OZONE_URI_DELIMITER + volume1, - "/bucketfso", + "/fsobucket1", BucketLayout.FILE_SYSTEM_OPTIMIZED.toString()); - // Create another volume volume2 with bucket and some keys into it. + // Create another volume volume2 with bucket and some keys into it. generateKeys(OZONE_URI_DELIMITER + volume2, "/bucket2", BucketLayout.FILE_SYSTEM_OPTIMIZED.toString()); - // Create OBS bucket in volume1 - String[] args = new String[] {"bucket", "create", "--layout", - BucketLayout.OBJECT_STORE.toString(), volume1 + "/bucketobs"}; - execute(ozoneShell, args); - out.reset(); - - // Insert few keys into OBS bucket - String keyName = OZONE_URI_DELIMITER + volume1 + "/bucketobs" + - OZONE_URI_DELIMITER + "key"; - for (int i = 0; i < 5; i++) { - args = new String[] { - "key", "put", "o3://" + omServiceId + keyName + i, - testFile.getPath()}; - execute(ozoneShell, args); - } - out.reset(); - - // Create Legacy bucket in volume1 - args = new String[] {"bucket", "create", "--layout", - BucketLayout.LEGACY.toString(), volume1 + "/bucketlegacy"}; - execute(ozoneShell, args); - out.reset(); - - // Insert few keys into legacy bucket - keyName = OZONE_URI_DELIMITER + volume1 + "/bucketlegacy" + - OZONE_URI_DELIMITER + "key"; - for (int i = 0; i < 5; i++) { - args = new String[] { - "key", "put", "o3://" + omServiceId + keyName + i, - testFile.getPath()}; - execute(ozoneShell, args); - } - out.reset(); + createBucketAndGenerateKeys(volume1, FILE_SYSTEM_OPTIMIZED, "fsobucket2"); + createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket1"); + createBucketAndGenerateKeys(volume1, OBJECT_STORE, "obsbucket2"); + createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket1"); + createBucketAndGenerateKeys(volume1, LEGACY, "legacybucket2"); // Try volume delete without recursive // It should fail as volume is not empty @@ -1902,22 +1880,50 @@ public void testRecursiveVolumeDelete() assertEquals(client.getObjectStore().getVolume(volume1) .getName(), volume1); - // Delete volume1(containing OBS, FSO and Legacy buckets) recursively - args = - new String[] {"volume", "delete", volume1, "-r", "--yes"}; + // Delete volume1(containing OBS, FSO and Legacy buckets) recursively with thread count + String[] args = new String[] {"volume", "delete", volume1, "-r", "--yes", "-t", String.valueOf(threadCount)}; execute(ozoneShell, args); out.reset(); + // volume1 should not exist + omExecution = assertThrows(OMException.class, + () -> client.getObjectStore().getVolume(volume1)); + assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); + // volume2 should still exist assertEquals(client.getObjectStore().getVolume(volume2) .getName(), volume2); - // volume1 should not exist + // Delete volume2 recursively + args = new String[] {"volume", "delete", volume2, "-r", "--yes"}; + execute(ozoneShell, args); + out.reset(); + + // volume2 should not exist omExecution = assertThrows(OMException.class, - () -> client.getObjectStore().getVolume(volume1)); + () -> client.getObjectStore().getVolume(volume2)); assertEquals(VOLUME_NOT_FOUND, omExecution.getResult()); } + private void createBucketAndGenerateKeys(String volume, BucketLayout layout, String bucketName) { + // Create bucket + String[] args = new String[] {"bucket", "create", volume + "/" + bucketName, + "--layout", layout.toString()}; + execute(ozoneShell, args); + out.reset(); + + // Insert keys + String keyName = OZONE_URI_DELIMITER + volume + "/" + bucketName + + OZONE_URI_DELIMITER + "key"; + for (int i = 0; i < 5; i++) { + args = new String[] { + "key", "put", "o3://" + omServiceId + keyName + i, + testFile.getPath()}; + execute(ozoneShell, args); + } + out.reset(); + } + @Test public void testLinkedAndNonLinkedBucketMetaData() throws Exception { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java index e380e98561b0..8cc80502386f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java @@ -121,6 +121,11 @@ private void deleteVolumeRecursive() totalBucketCount++; } doCleanBuckets(); + // Reset counters and bucket list + numberOfBucketsCleaned.set(0); + totalBucketCount = 0; + cleanedBucketCounter.set(0); + bucketIdList.clear(); } /** @@ -201,6 +206,7 @@ public void run() { if (!cleanOBSBucket(bucket)) { throw new RuntimeException("Failed to clean bucket"); } + break; default: throw new RuntimeException("Invalid bucket layout"); } From 38768527f3b40326bb4a7d8b1d4bcdc98ccb8ca0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Feb 2024 11:16:22 +0100 Subject: [PATCH 025/108] HDDS-10368. Bump guice to 6.0.0 (#6212) --- hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 1 + hadoop-ozone/dist/src/main/license/jar-report.txt | 1 + pom.xml | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 82ee0e4e180d..64f67fd4dfde 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -351,6 +351,7 @@ Apache License 2.0 io.prometheus:simpleclient_common io.prometheus:simpleclient_dropwizard joda-time:joda-time + jakarta.inject:jakarta.inject-api jakarta.validation:jakarta.validation-api javax.enterprise:cdi-api javax.inject:javax.inject diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index ee0797cf2765..58feec550034 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -108,6 +108,7 @@ share/ozone/lib/jakarta.activation.jar share/ozone/lib/jakarta.activation-api.jar share/ozone/lib/jakarta.annotation-api.jar share/ozone/lib/jakarta.inject.jar +share/ozone/lib/jakarta.inject-api.jar share/ozone/lib/jakarta.validation-api.jar share/ozone/lib/jakarta.ws.rs-api.jar share/ozone/lib/jakarta.xml.bind-api.jar diff --git a/pom.xml b/pom.xml index 7b06872475e9..bb94d1d81384 100644 --- a/pom.xml +++ b/pom.xml @@ -210,7 +210,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 0.19 2.2.0 32.0.0-jre - 5.1.0 + 6.0.0 2.9.0 1.0 From 78fc781829e871839fb9413d62a65a89b1dcec6b Mon Sep 17 00:00:00 2001 From: Sadanand Shenoy Date: Thu, 15 Feb 2024 16:13:10 +0530 Subject: [PATCH 026/108] HDDS-10369. Set Times API doesn't work with linked buckets. (#6220) --- .../AbstractRootedOzoneFileSystemTest.java | 35 +++++++++++++++++++ .../om/request/key/OMKeySetTimesRequest.java | 4 ++- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index 03167ce11f36..9d31ac30cbb7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -2461,4 +2461,39 @@ void testSetTimes() throws Exception { assertEquals(mtime, fileStatus.getModificationTime()); } + @Test + public void testSetTimesForLinkedBucketPath() throws Exception { + // Create a file + OzoneBucket sourceBucket = + TestDataUtil.createVolumeAndBucket(client, bucketLayout); + Path volumePath1 = + new Path(OZONE_URI_DELIMITER, sourceBucket.getVolumeName()); + Path sourceBucketPath = new Path(volumePath1, sourceBucket.getName()); + Path path = new Path(sourceBucketPath, "key1"); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + OzoneVolume sourceVol = client.getObjectStore().getVolume(sourceBucket.getVolumeName()); + String linkBucketName = UUID.randomUUID().toString(); + createLinkBucket(sourceVol.getName(), linkBucketName, + sourceVol.getName(), sourceBucket.getName()); + + Path linkedBucketPath = new Path(volumePath1, linkBucketName); + Path keyInLinkedBucket = new Path(linkedBucketPath, "key1"); + + // test setTimes in linked bucket path + long mtime = 1000; + fs.setTimes(keyInLinkedBucket, mtime, 2000); + + FileStatus fileStatus = fs.getFileStatus(path); + // verify that mtime is updated as expected. + assertEquals(mtime, fileStatus.getModificationTime()); + + long mtimeDontUpdate = -1; + fs.setTimes(keyInLinkedBucket, mtimeDontUpdate, 2000); + + fileStatus = fs.getFileStatus(keyInLinkedBucket); + // verify that mtime is NOT updated as expected. + assertEquals(mtime, fileStatus.getModificationTime()); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 1f5e623da0d2..e14cfaaad281 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -74,10 +74,12 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .setKeyName(normalizedKeyPath) .build(); + OzoneManagerProtocolProtos.KeyArgs newKeyArgs = resolveBucketLink(ozoneManager, keyArgs); + return request.toBuilder() .setSetTimesRequest( setTimesRequest.toBuilder() - .setKeyArgs(keyArgs) + .setKeyArgs(newKeyArgs) .setMtime(getModificationTime())) .build(); } From 6194d422f30960a6b032b7c80df535c0a7727f21 Mon Sep 17 00:00:00 2001 From: Smita <112169209+smitajoshi12@users.noreply.github.com> Date: Thu, 15 Feb 2024 20:11:53 +0530 Subject: [PATCH 027/108] HDDS-10301. Recon - Fold the pipeline info for a DN on Datanode page. (#6198) --- .../webapps/recon/ozone-recon-web/api/db.json | 36 ++++++++++++++++ .../src/views/datanodes/datanodes.tsx | 41 +++++++++++++------ 2 files changed, 64 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index 204609f66fec..e4ed0ac048e9 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -44,6 +44,42 @@ "replicationType": "RATIS", "replicationFactor": 1, "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc710", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc711", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc712", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc713", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc714", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost1.storage.enterprise.com" + }, + { + "pipelineID": "09d3a478-ff01-4ce6-ad75-f3ec79bcc715", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost1.storage.enterprise.com" } ], "containers": 80, diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx index 19f306ae4f6b..c42bd8c1f91b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx @@ -17,7 +17,7 @@ */ import React from 'react'; -import {Table, Icon, Tooltip} from 'antd'; +import {Table, Icon, Tooltip, Popover} from 'antd'; import {PaginationConfig} from 'antd/lib/pagination'; import moment from 'moment'; import {ReplicationIcon} from 'utils/themeIcons'; @@ -192,21 +192,36 @@ const COLUMNS = [ key: 'pipelines', isVisible: true, render: (pipelines: IPipeline[], record: IDatanode) => { + let firstThreePipelinesIDs = []; + let remainingPipelinesIDs: any[] = []; + firstThreePipelinesIDs = pipelines && pipelines.filter((element, index) => index < 3); + remainingPipelinesIDs = pipelines && pipelines.slice(3, pipelines.length); + + const RenderPipelineIds = ({ pipelinesIds }) => { + return pipelinesIds && pipelinesIds.map((pipeline: any, index: any) => ( +

+ + {pipeline.pipelineID} +
+ )) + } + return ( -
+ <> { - pipelines && pipelines.map((pipeline, index) => ( -
- - {pipeline.pipelineID} -
- )) + } -
+ { + remainingPipelinesIDs.length > 0 && + } title="Remaining pipelines" placement="rightTop" trigger="hover"> + {`... and ${remainingPipelinesIDs.length} more pipelines`} + + } + ); } }, From 2348784bec6baa118057e78a9716c6a32819a9a9 Mon Sep 17 00:00:00 2001 From: Duong Nguyen Date: Thu, 15 Feb 2024 09:14:38 -0800 Subject: [PATCH 028/108] HDDS-10288. Checksum to support direct buffers (#6162) --- .../ozone/common/ChecksumByteBufferImpl.java | 29 +++++++++++++++++++ .../ozone/common/TestChecksumByteBuffer.java | 19 ++++++++++++ 2 files changed, 48 insertions(+) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java index 5fab7eacdf6d..1d596bf70077 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java @@ -17,9 +17,13 @@ */ package org.apache.hadoop.ozone.common; +import org.apache.hadoop.hdds.JavaUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.lang.reflect.Field; import java.nio.ByteBuffer; import java.util.zip.Checksum; @@ -35,6 +39,8 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { private final Checksum checksum; private static final Field IS_READY_ONLY_FIELD; + // To access Checksum.update(ByteBuffer) API from Java 9+. + private static final MethodHandle BYTE_BUFFER_UPDATE; static { Field f = null; @@ -46,6 +52,18 @@ public class ChecksumByteBufferImpl implements ChecksumByteBuffer { LOG.error("No isReadOnly field in ByteBuffer", e); } IS_READY_ONLY_FIELD = f; + + MethodHandle byteBufferUpdate = null; + if (JavaUtils.isJavaVersionAtLeast(9)) { + try { + byteBufferUpdate = MethodHandles.publicLookup().findVirtual(Checksum.class, "update", + MethodType.methodType(void.class, ByteBuffer.class)); + } catch (Throwable t) { + throw new IllegalStateException("Failed to lookup Checksum.update(ByteBuffer)."); + } + } + BYTE_BUFFER_UPDATE = byteBufferUpdate; + } public ChecksumByteBufferImpl(Checksum impl) { @@ -57,6 +75,17 @@ public ChecksumByteBufferImpl(Checksum impl) { // should be refactored to simply call checksum.update(buffer), as the // Checksum interface has been enhanced to allow this since Java 9. public void update(ByteBuffer buffer) { + // Prefer JDK9+ implementation that allows ByteBuffer. This allows DirectByteBuffer to be checksum directly in + // native memory. + if (BYTE_BUFFER_UPDATE != null) { + try { + BYTE_BUFFER_UPDATE.invokeExact(checksum, buffer); + return; + } catch (Throwable e) { + throw new IllegalStateException("Error invoking " + BYTE_BUFFER_UPDATE, e); + } + } + // this is a hack to not do memory copy. if (IS_READY_ONLY_FIELD != null) { try { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java index 9567fa2c281e..0d30d43dc01f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java @@ -19,8 +19,10 @@ import org.apache.hadoop.util.PureJavaCrc32; import org.apache.hadoop.util.PureJavaCrc32C; +import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import org.apache.commons.lang3.RandomUtils; import java.util.zip.Checksum; @@ -45,6 +47,23 @@ public void testPureJavaCrc32CByteBuffer() { new VerifyChecksumByteBuffer(expected, testee).testCorrectness(); } + @Test + public void testWithDirectBuffer() { + final ChecksumByteBuffer checksum = ChecksumByteBufferFactory.crc32CImpl(); + byte[] value = "test".getBytes(StandardCharsets.UTF_8); + checksum.reset(); + checksum.update(value, 0, value.length); + long checksum1 = checksum.getValue(); + + ByteBuffer byteBuffer = ByteBuffer.allocateDirect(value.length); + byteBuffer.put(value).rewind(); + checksum.reset(); + checksum.update(byteBuffer); + long checksum2 = checksum.getValue(); + + Assertions.assertEquals(checksum1, checksum2); + } + static class VerifyChecksumByteBuffer { private final Checksum expected; private final ChecksumByteBuffer testee; From cce2f969a85323441c476aaeaf27d45b081b0c2f Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Thu, 15 Feb 2024 15:57:52 -0800 Subject: [PATCH 029/108] HDDS-10339. Add S3 API level dashboard (#6202) Co-authored-by: Ritesh H Shukla --- .../dashboards/Ozone - S3 Dashboard.json | 1209 +++++++++++++++++ 1 file changed, 1209 insertions(+) create mode 100644 hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - S3 Dashboard.json diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - S3 Dashboard.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - S3 Dashboard.json new file mode 100644 index 000000000000..9518995d70a5 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - S3 Dashboard.json @@ -0,0 +1,1209 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 7, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 9, + "panels": [], + "title": "S3 Latency measurements", + "type": "row" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "label_replace({__name__=~\"s3_gateway_metrics_${S3API}_success_latency_ns_avg_time\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_success_latency_ns_avg_time\")", + "instant": false, + "legendFormat": "{{api}} {{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "API avg latency - Success", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "label_replace({__name__=~\"s3_gateway_metrics_${S3API}_failure_latency_ns_avg_time\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_failure_latency_ns_avg_time\")", + "instant": false, + "legendFormat": "{{api}} {{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "API avg latency - Failure", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "label_replace({__name__=~\"s3_gateway_metrics_${S3API}_success_latency_ns60s${percentile}_percentile_time\"}, \"api\", \"$1 $2\", \"__name__\", \"s3_gateway_metrics_(.*)_success_latency_ns60s(.*)_percentile_time\")", + "instant": false, + "legendFormat": "{{api}} {{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "Combined Percentile - Success", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 27, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "label_replace({__name__=~\"s3_gateway_metrics_${S3API}_failure_latency_ns60s${percentile}_percentile_time\"}, \"api\", \"$1 $2\", \"__name__\", \"s3_gateway_metrics_(.*)_failure_latency_ns60s(.*)_percentile_time\")", + "instant": false, + "legendFormat": "{{api}} {{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "Combined Percentile - Failure", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "label_replace({__name__=~\"(s3_gateway_metrics_)${S3API}_success_latency_.*_max_time\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_success_latency_.*_max_time\")", + "hide": false, + "instant": false, + "legendFormat": "{{api}}, {{hostname}}", + "range": true, + "refId": "B" + } + ], + "title": "Max latency - Success", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ns", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "label_replace({__name__=~\"(s3_gateway_metrics_)${S3API}_failure_latency_.*_max_time\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_failure_latency_.*_max_time\")", + "hide": false, + "instant": false, + "legendFormat": "{{api}}, {{hostname}}", + "range": true, + "refId": "B" + } + ], + "title": "Max latency - Failure", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 8, + "panels": [], + "title": "Rate of S3 operations", + "type": "row" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": true, + "expr": "rate(label_replace({__name__=~\"(s3_gateway_metrics_)${S3API}_success\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_success\")[5m:30s])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{api}}, {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "APIs per second - Success", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "rate(label_replace({__name__=~\"(s3_gateway_metrics_)${S3API}_failure\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_failure\")[5m:30s])", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{api}} {{hostname}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "APIs per second - Failure", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 34 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "rate(label_replace({__name__=~\"(s3_gateway_metrics_)${S3API}_success_length\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_success_length\")[3m:30s])", + "hide": false, + "instant": false, + "legendFormat": "{{api}}, {{hostname}}", + "range": true, + "refId": "A" + } + ], + "title": "Data transfer per second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes", + "unitScale": true + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 34 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus" + }, + "editorMode": "code", + "expr": "rate(sum(label_replace({__name__=~\"(s3_gateway_metrics_)${S3API}_success_length\"}, \"api\", \"$1\", \"__name__\", \"s3_gateway_metrics_(.+)_success_length\"))[3m:30s])", + "hide": false, + "instant": false, + "legendFormat": "Total Data transfer across all S3 Gateways", + "range": true, + "refId": "A" + } + ], + "title": "Aggregate Data transfer per second", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "hide": 0, + "includeAll": true, + "label": "S3API", + "multi": true, + "name": "S3API", + "options": [ + { + "selected": true, + "text": "All", + "value": "$__all" + }, + { + "selected": false, + "text": "abort_multipart_upload", + "value": "abort_multipart_upload" + }, + { + "selected": false, + "text": "complete_multipart_upload", + "value": "complete_multipart_upload" + }, + { + "selected": false, + "text": "copy_key_metadata", + "value": "copy_key_metadata" + }, + { + "selected": false, + "text": "copy_object", + "value": "copy_object" + }, + { + "selected": false, + "text": "create_bucket", + "value": "create_bucket" + }, + { + "selected": false, + "text": "create_key", + "value": "create_key" + }, + { + "selected": false, + "text": "create_multipart_key", + "value": "create_multipart_key" + }, + { + "selected": false, + "text": "delete_bucket", + "value": "delete_bucket" + }, + { + "selected": false, + "text": "delete_key", + "value": "delete_key" + }, + { + "selected": false, + "text": "get_acl", + "value": "get_acl" + }, + { + "selected": false, + "text": "get_bucket", + "value": "get_bucket" + }, + { + "selected": false, + "text": "get_key", + "value": "get_key" + }, + { + "selected": false, + "text": "head_bucket", + "value": "head_bucket" + }, + { + "selected": false, + "text": "head_key", + "value": "head_key" + }, + { + "selected": false, + "text": "init_multipart_uploads", + "value": "init_multipart_uploads" + }, + { + "selected": false, + "text": "list_multipart_uploads", + "value": "list_multipart_uploads" + }, + { + "selected": false, + "text": "list_parts", + "value": "list_parts" + }, + { + "selected": false, + "text": "list_s3_buckets", + "value": "list_s3_buckets" + }, + { + "selected": false, + "text": "put_acl", + "value": "put_acl" + }, + { + "selected": false, + "text": "put_key", + "value": "put_key" + } + ], + "query": "abort_multipart_upload, complete_multipart_upload, copy_key_metadata, copy_object, create_bucket, create_key, create_multipart_key, delete_bucket, delete_key, get_acl, get_bucket, get_key, head_bucket, head_key, init_multipart_uploads, list_multipart_uploads, list_parts, list_s3_buckets, put_acl, put_key ", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + }, + { + "current": { + "selected": false, + "text": [ + "99th" + ], + "value": [ + "99th" + ] + }, + "hide": 0, + "includeAll": true, + "label": "Latency Percentile", + "multi": true, + "name": "percentile", + "options": [ + { + "selected": false, + "text": "All", + "value": "$__all" + }, + { + "selected": true, + "text": "99th", + "value": "99th" + }, + { + "selected": false, + "text": "95th", + "value": "95th" + }, + { + "selected": false, + "text": "90th", + "value": "90th" + }, + { + "selected": false, + "text": "75th", + "value": "75th" + }, + { + "selected": false, + "text": "50th", + "value": "50th" + } + ], + "query": "99th, 95th, 90th, 75th, 50th", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "S3 API Dashboard", + "version": 16, + "weekStart": "" +} \ No newline at end of file From e0f3ae14062742c33ec1a2703cd447665ca2a956 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 18 Feb 2024 08:24:23 +0100 Subject: [PATCH 030/108] HDDS-10391. Bump joda-time to 2.12.7 (#6230) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index bb94d1d81384..b6ee69a3300c 100644 --- a/pom.xml +++ b/pom.xml @@ -205,7 +205,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.2.2 2.6.1 2.1.1 - 2.12.5 + 2.12.7 0.19 2.2.0 From c6724537d7413d5244bd3843fe7170d954d5d77e Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Mon, 19 Feb 2024 21:59:47 -0800 Subject: [PATCH 031/108] HDDS-10387. Fix parameter number warning in KeyOutputStream and related classes (#6225) --- .../client/io/BlockOutputStreamEntry.java | 62 +++++---- .../client/io/BlockOutputStreamEntryPool.java | 65 +++------ .../client/io/ECBlockOutputStreamEntry.java | 101 +------------- .../io/ECBlockOutputStreamEntryPool.java | 49 +------ .../ozone/client/io/ECKeyOutputStream.java | 125 +++++------------- .../ozone/client/io/KeyOutputStream.java | 72 +++------- .../io/TestECBlockOutputStreamEntry.java | 16 +-- .../ozone/client/OzoneOutputStreamStub.java | 7 +- 8 files changed, 117 insertions(+), 380 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index 9bdec27f534f..c0221d07a55e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -37,6 +37,7 @@ import org.apache.hadoop.security.token.Token; import com.google.common.annotations.VisibleForTesting; +import org.apache.ratis.util.JavaUtils; /** * A BlockOutputStreamEntry manages the data writes into the DataNodes. @@ -60,33 +61,28 @@ public class BlockOutputStreamEntry extends OutputStream { private long currentPosition; private final Token token; - private BufferPool bufferPool; - private ContainerClientMetrics clientMetrics; - private StreamBufferArgs streamBufferArgs; - - @SuppressWarnings({"parameternumber", "squid:S00107"}) - BlockOutputStreamEntry( - BlockID blockID, String key, - XceiverClientFactory xceiverClientManager, - Pipeline pipeline, - long length, - BufferPool bufferPool, - Token token, - OzoneClientConfig config, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs - ) { - this.config = config; + private final BufferPool bufferPool; + private final ContainerClientMetrics clientMetrics; + private final StreamBufferArgs streamBufferArgs; + + BlockOutputStreamEntry(Builder b) { + this.config = b.config; this.outputStream = null; - this.blockID = blockID; - this.key = key; - this.xceiverClientManager = xceiverClientManager; - this.pipeline = pipeline; - this.token = token; - this.length = length; + this.blockID = b.blockID; + this.key = b.key; + this.xceiverClientManager = b.xceiverClientManager; + this.pipeline = b.pipeline; + this.token = b.token; + this.length = b.length; this.currentPosition = 0; - this.bufferPool = bufferPool; - this.clientMetrics = clientMetrics; - this.streamBufferArgs = streamBufferArgs; + this.bufferPool = b.bufferPool; + this.clientMetrics = b.clientMetrics; + this.streamBufferArgs = b.streamBufferArgs; + } + + @Override + public String toString() { + return JavaUtils.getClassSimpleName(getClass()) + ":" + key + " " + blockID; } /** @@ -362,6 +358,14 @@ public static class Builder { private ContainerClientMetrics clientMetrics; private StreamBufferArgs streamBufferArgs; + public Pipeline getPipeline() { + return pipeline; + } + + public long getLength() { + return length; + } + public Builder setBlockID(BlockID bID) { this.blockID = bID; return this; @@ -412,13 +416,7 @@ public Builder setStreamBufferArgs(StreamBufferArgs streamBufferArgs) { } public BlockOutputStreamEntry build() { - return new BlockOutputStreamEntry(blockID, - key, - xceiverClientManager, - pipeline, - length, - bufferPool, - token, config, clientMetrics, streamBufferArgs); + return new BlockOutputStreamEntry(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index d0f3b5728a8b..4d6026f92590 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -26,7 +26,6 @@ import java.util.ListIterator; import java.util.Map; -import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.scm.ContainerClientMetrics; import org.apache.hadoop.hdds.scm.OzoneClientConfig; @@ -62,7 +61,7 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { /** * List of stream entries that are used to write a block of data. */ - private final List streamEntries; + private final List streamEntries = new ArrayList<>(); private final OzoneClientConfig config; /** * The actual stream entry we are writing into. Note that a stream entry is @@ -73,7 +72,6 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final OzoneManagerProtocol omClient; private final OmKeyArgs keyArgs; private final XceiverClientFactory xceiverClientFactory; - private final String requestID; /** * A {@link BufferPool} shared between all * {@link org.apache.hadoop.hdds.scm.storage.BlockOutputStream}s managed by @@ -86,39 +84,31 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final ContainerClientMetrics clientMetrics; private final StreamBufferArgs streamBufferArgs; - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public BlockOutputStreamEntryPool( - OzoneClientConfig config, - OzoneManagerProtocol omClient, - String requestId, ReplicationConfig replicationConfig, - String uploadID, int partNumber, - boolean isMultipart, OmKeyInfo info, - boolean unsafeByteBufferConversion, - XceiverClientFactory xceiverClientFactory, long openID, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs - ) { - this.config = config; - this.xceiverClientFactory = xceiverClientFactory; - streamEntries = new ArrayList<>(); + public BlockOutputStreamEntryPool(KeyOutputStream.Builder b) { + this.config = b.getClientConfig(); + this.xceiverClientFactory = b.getXceiverManager(); currentStreamIndex = 0; - this.omClient = omClient; + this.omClient = b.getOmClient(); + final OmKeyInfo info = b.getOpenHandler().getKeyInfo(); this.keyArgs = new OmKeyArgs.Builder().setVolumeName(info.getVolumeName()) .setBucketName(info.getBucketName()).setKeyName(info.getKeyName()) - .setReplicationConfig(replicationConfig).setDataSize(info.getDataSize()) - .setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID) - .setMultipartUploadPartNumber(partNumber).build(); - this.requestID = requestId; - this.openID = openID; + .setReplicationConfig(b.getReplicationConfig()) + .setDataSize(info.getDataSize()) + .setIsMultipartKey(b.isMultipartKey()) + .setMultipartUploadID(b.getMultipartUploadID()) + .setMultipartUploadPartNumber(b.getMultipartNumber()) + .build(); + this.openID = b.getOpenHandler().getId(); this.excludeList = createExcludeList(); + this.streamBufferArgs = b.getStreamBufferArgs(); this.bufferPool = new BufferPool(streamBufferArgs.getStreamBufferSize(), (int) (streamBufferArgs.getStreamBufferMaxSize() / streamBufferArgs .getStreamBufferSize()), ByteStringConversion - .createByteBufferConversion(unsafeByteBufferConversion)); - this.clientMetrics = clientMetrics; - this.streamBufferArgs = streamBufferArgs; + .createByteBufferConversion(b.isUnsafeByteBufferConversionEnabled())); + this.clientMetrics = b.getClientMetrics(); } ExcludeList createExcludeList() { @@ -126,25 +116,6 @@ ExcludeList createExcludeList() { Clock.system(ZoneOffset.UTC)); } - BlockOutputStreamEntryPool(ContainerClientMetrics clientMetrics, - OzoneClientConfig clientConfig, StreamBufferArgs streamBufferArgs) { - streamEntries = new ArrayList<>(); - omClient = null; - keyArgs = null; - xceiverClientFactory = null; - config = clientConfig; - streamBufferArgs.setStreamBufferFlushDelay(false); - requestID = null; - int chunkSize = 0; - bufferPool = new BufferPool(chunkSize, 1); - - currentStreamIndex = 0; - openID = -1; - excludeList = createExcludeList(); - this.clientMetrics = clientMetrics; - this.streamBufferArgs = null; - } - /** * When a key is opened, it is possible that there are some blocks already * allocated to it for this open session. In this case, to make use of these @@ -156,10 +127,8 @@ ExcludeList createExcludeList() { * * @param version the set of blocks that are pre-allocated. * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { + public void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) { // server may return any number of blocks, (0 to any) // only the blocks allocated in this open session (block createVersion // equals to open session version) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java index 07d0f46069ca..7f6ce87d60c5 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java @@ -23,17 +23,10 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.ContainerClientMetrics; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; -import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.security.token.Token; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,19 +68,10 @@ public class ECBlockOutputStreamEntry extends BlockOutputStreamEntry { private int currentStreamIdx = 0; private long successfulBlkGrpAckedLen; - @SuppressWarnings({"parameternumber", "squid:S00107"}) - ECBlockOutputStreamEntry(BlockID blockID, String key, - XceiverClientFactory xceiverClientManager, Pipeline pipeline, long length, - BufferPool bufferPool, Token token, - OzoneClientConfig config, ContainerClientMetrics clientMetrics, - StreamBufferArgs streamBufferArgs) { - super(blockID, key, xceiverClientManager, pipeline, length, bufferPool, - token, config, clientMetrics, streamBufferArgs); - assertInstanceOf( - pipeline.getReplicationConfig(), ECReplicationConfig.class); - this.replicationConfig = - (ECReplicationConfig) pipeline.getReplicationConfig(); - this.length = replicationConfig.getData() * length; + ECBlockOutputStreamEntry(Builder b) { + super(b); + this.replicationConfig = assertInstanceOf(b.getPipeline().getReplicationConfig(), ECReplicationConfig.class); + this.length = replicationConfig.getData() * b.getLength(); } @Override @@ -433,82 +417,9 @@ public ByteString calculateChecksum() throws IOException { /** * Builder class for ChunkGroupOutputStreamEntry. * */ - public static class Builder { - private BlockID blockID; - private String key; - private XceiverClientFactory xceiverClientManager; - private Pipeline pipeline; - private long length; - private BufferPool bufferPool; - private Token token; - private OzoneClientConfig config; - private ContainerClientMetrics clientMetrics; - private StreamBufferArgs streamBufferArgs; - - public ECBlockOutputStreamEntry.Builder setBlockID(BlockID bID) { - this.blockID = bID; - return this; - } - - public ECBlockOutputStreamEntry.Builder setKey(String keys) { - this.key = keys; - return this; - } - - public ECBlockOutputStreamEntry.Builder setXceiverClientManager( - XceiverClientFactory - xClientManager) { - this.xceiverClientManager = xClientManager; - return this; - } - - public ECBlockOutputStreamEntry.Builder setPipeline(Pipeline ppln) { - this.pipeline = ppln; - return this; - } - - public ECBlockOutputStreamEntry.Builder setLength(long len) { - this.length = len; - return this; - } - - public ECBlockOutputStreamEntry.Builder setBufferPool(BufferPool pool) { - this.bufferPool = pool; - return this; - } - - public ECBlockOutputStreamEntry.Builder setConfig( - OzoneClientConfig clientConfig) { - this.config = clientConfig; - return this; - } - - public ECBlockOutputStreamEntry.Builder setToken( - Token bToken) { - this.token = bToken; - return this; - } - - public ECBlockOutputStreamEntry.Builder setClientMetrics( - ContainerClientMetrics containerClientMetrics) { - this.clientMetrics = containerClientMetrics; - return this; - } - - public ECBlockOutputStreamEntry.Builder setStreamBufferArgs( - StreamBufferArgs args) { - this.streamBufferArgs = args; - return this; - } - + public static class Builder extends BlockOutputStreamEntry.Builder { public ECBlockOutputStreamEntry build() { - return new ECBlockOutputStreamEntry(blockID, - key, - xceiverClientManager, - pipeline, - length, - bufferPool, - token, config, clientMetrics, streamBufferArgs); + return new ECBlockOutputStreamEntry(this); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java index e551605d842d..e278097a495a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java @@ -17,19 +17,7 @@ */ package org.apache.hadoop.ozone.client.io; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.hadoop.hdds.scm.ContainerClientMetrics; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; - -import java.time.Clock; -import java.time.ZoneOffset; /** * {@link BlockOutputStreamEntryPool} is responsible to manage OM communication @@ -44,37 +32,14 @@ * @see ECBlockOutputStreamEntry */ public class ECBlockOutputStreamEntryPool extends BlockOutputStreamEntryPool { - - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public ECBlockOutputStreamEntryPool(OzoneClientConfig config, - OzoneManagerProtocol omClient, - String requestId, - ReplicationConfig replicationConfig, - String uploadID, - int partNumber, - boolean isMultipart, - OmKeyInfo info, - boolean unsafeByteBufferConversion, - XceiverClientFactory xceiverClientFactory, - long openID, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs) { - super(config, omClient, requestId, replicationConfig, uploadID, partNumber, - isMultipart, info, unsafeByteBufferConversion, xceiverClientFactory, - openID, clientMetrics, streamBufferArgs); - assert replicationConfig instanceof ECReplicationConfig; - } - - @Override - ExcludeList createExcludeList() { - return new ExcludeList(getConfig().getExcludeNodesExpiryTime(), - Clock.system(ZoneOffset.UTC)); + public ECBlockOutputStreamEntryPool(ECKeyOutputStream.Builder builder) { + super(builder); } @Override - BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { - return - new ECBlockOutputStreamEntry.Builder() - .setBlockID(subKeyInfo.getBlockID()) + ECBlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setBlockID(subKeyInfo.getBlockID()) .setKey(getKeyName()) .setXceiverClientManager(getXceiverClientFactory()) .setPipeline(subKeyInfo.getPipeline()) @@ -83,8 +48,8 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setBufferPool(getBufferPool()) .setToken(subKeyInfo.getToken()) .setClientMetrics(getClientMetrics()) - .setStreamBufferArgs(getStreamBufferArgs()) - .build(); + .setStreamBufferArgs(getStreamBufferArgs()); + return b.build(); } @Override diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java index b5c36474ff9e..878558073f75 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java @@ -17,12 +17,28 @@ */ package org.apache.hadoop.ozone.client.io; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.hadoop.fs.FSExceptionMessages; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; +import org.apache.hadoop.io.ByteBufferPool; +import org.apache.hadoop.ozone.om.protocol.S3Auth; +import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder; +import org.apache.ozone.erasurecode.rawcoder.util.CodecUtil; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.nio.Buffer; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; @@ -35,30 +51,6 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; -import org.apache.hadoop.fs.FSExceptionMessages; -import org.apache.hadoop.hdds.client.ECReplicationConfig; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.XceiverClientFactory; -import org.apache.hadoop.hdds.scm.client.HddsClientUtils; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.io.ByteBufferPool; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.ozone.om.protocol.S3Auth; -import org.apache.ozone.erasurecode.rawcoder.RawErasureEncoder; -import org.apache.ozone.erasurecode.rawcoder.util.CodecUtil; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - /** * ECKeyOutputStream handles the EC writes by writing the data into underlying * block output streams chunk by chunk. @@ -100,22 +92,6 @@ private enum StripeWriteStatus { private long offset; // how much data has been ingested into the stream private long writeOffset; - private final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool; - - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); - } - - @VisibleForTesting - public XceiverClientFactory getXceiverClientFactory() { - return blockOutputStreamEntryPool.getXceiverClientFactory(); - } - - @VisibleForTesting - public List getLocationInfoList() { - return blockOutputStreamEntryPool.getLocationInfoList(); - } @VisibleForTesting public void insertFlushCheckpoint(long version) throws IOException { @@ -128,8 +104,7 @@ public long getFlushCheckpoint() { } private ECKeyOutputStream(Builder builder) { - super(builder.getReplicationConfig(), builder.getClientMetrics(), - builder.getClientConfig(), builder.getStreamBufferArgs()); + super(builder.getReplicationConfig(), new ECBlockOutputStreamEntryPool(builder)); this.config = builder.getClientConfig(); this.bufferPool = builder.getByteBufferPool(); // For EC, cell/chunk size and buffer size can be same for now. @@ -140,16 +115,6 @@ private ECKeyOutputStream(Builder builder) { ecChunkSize, numDataBlks, numParityBlks, bufferPool); chunkIndex = 0; ecStripeQueue = new ArrayBlockingQueue<>(config.getEcStripeQueueSize()); - OmKeyInfo info = builder.getOpenHandler().getKeyInfo(); - blockOutputStreamEntryPool = - new ECBlockOutputStreamEntryPool(config, - builder.getOmClient(), builder.getRequestID(), - builder.getReplicationConfig(), - builder.getMultipartUploadID(), builder.getMultipartNumber(), - builder.isMultipartKey(), - info, builder.isUnsafeByteBufferConversionEnabled(), - builder.getXceiverManager(), builder.getOpenHandler().getId(), - builder.getClientMetrics(), builder.getStreamBufferArgs()); this.writeOffset = 0; this.encoder = CodecUtil.createRawEncoderWithFallback( @@ -164,22 +129,9 @@ private ECKeyOutputStream(Builder builder) { this.atomicKeyCreation = builder.getAtomicKeyCreation(); } - /** - * When a key is opened, it is possible that there are some blocks already - * allocated to it for this open session. In this case, to make use of these - * blocks, we need to add these blocks to stream entries. But, a key's version - * also includes blocks from previous versions, we need to avoid adding these - * old blocks to stream entries, because these old blocks should not be picked - * for write. To do this, the following method checks that, only those - * blocks created in this particular open version are added to stream entries. - * - * @param version the set of blocks that are pre-allocated. - * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException - */ - public void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { - blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); + @Override + protected ECBlockOutputStreamEntryPool getBlockOutputStreamEntryPool() { + return (ECBlockOutputStreamEntryPool) super.getBlockOutputStreamEntryPool(); } /** @@ -218,6 +170,7 @@ private void rollbackAndReset(ECChunkBuffers stripe) throws IOException { final ByteBuffer[] dataBuffers = stripe.getDataBuffers(); offset -= Arrays.stream(dataBuffers).mapToInt(Buffer::limit).sum(); + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); final ECBlockOutputStreamEntry failedStreamEntry = blockOutputStreamEntryPool.getCurrentStreamEntry(); failedStreamEntry.resetToFirstEntry(); @@ -256,8 +209,7 @@ private void logStreamError(List failedStreams, private StripeWriteStatus commitStripeWrite(ECChunkBuffers stripe) throws IOException { - ECBlockOutputStreamEntry streamEntry = - blockOutputStreamEntryPool.getCurrentStreamEntry(); + final ECBlockOutputStreamEntry streamEntry = getBlockOutputStreamEntryPool().getCurrentStreamEntry(); List failedStreams = streamEntry.streamsWithWriteFailure(); if (!failedStreams.isEmpty()) { @@ -297,6 +249,7 @@ private void excludePipelineAndFailedDN(Pipeline pipeline, List failedStreams) { // Exclude the failed pipeline + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool.getExcludeList().addPipeline(pipeline.getId()); // If the failure is NOT caused by other reasons (e.g. container full), @@ -362,6 +315,7 @@ private void generateParityCells() throws IOException { } private void writeDataCells(ECChunkBuffers stripe) throws IOException { + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool.allocateBlockIfNeeded(); ByteBuffer[] dataCells = stripe.getDataBuffers(); for (int i = 0; i < numDataBlks; i++) { @@ -374,6 +328,7 @@ private void writeDataCells(ECChunkBuffers stripe) throws IOException { private void writeParityCells(ECChunkBuffers stripe) { // Move the stream entry cursor to parity block index + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); blockOutputStreamEntryPool .getCurrentStreamEntry().forceToFirstParityBlock(); ByteBuffer[] parityCells = stripe.getParityBuffers(); @@ -413,7 +368,7 @@ private void handleOutputStreamWrite(ByteBuffer buffer, boolean isParity) { // The len cannot be bigger than cell buffer size. assert buffer.limit() <= ecChunkSize : "The buffer size: " + buffer.limit() + " should not exceed EC chunk size: " + ecChunkSize; - writeToOutputStream(blockOutputStreamEntryPool.getCurrentStreamEntry(), + writeToOutputStream(getBlockOutputStreamEntryPool().getCurrentStreamEntry(), buffer.array(), buffer.limit(), 0, isParity); } catch (Exception e) { markStreamAsFailed(e); @@ -449,8 +404,7 @@ private void handleException(BlockOutputStreamEntry streamEntry, Preconditions.checkNotNull(t); boolean containerExclusionException = checkIfContainerToExclude(t); if (containerExclusionException) { - blockOutputStreamEntryPool.getExcludeList() - .addPipeline(streamEntry.getPipeline().getId()); + getBlockOutputStreamEntryPool().getExcludeList().addPipeline(streamEntry.getPipeline().getId()); } markStreamAsFailed(exception); } @@ -460,7 +414,7 @@ private void markStreamClosed() { } private void markStreamAsFailed(Exception e) { - blockOutputStreamEntryPool.getCurrentStreamEntry().markFailed(e); + getBlockOutputStreamEntryPool().getCurrentStreamEntry().markFailed(e); } @Override @@ -470,6 +424,7 @@ public void flush() { private void closeCurrentStreamEntry() throws IOException { + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); if (!blockOutputStreamEntryPool.isEmpty()) { while (true) { try { @@ -503,6 +458,7 @@ public void close() throws IOException { return; } closed = true; + final ECBlockOutputStreamEntryPool blockOutputStreamEntryPool = getBlockOutputStreamEntryPool(); try { if (!closing) { // If stripe buffer is not empty, encode and flush the stripe. @@ -614,20 +570,6 @@ public static void padBufferToLimit(ByteBuffer buf, int limit) { buf.position(limit); } - public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { - return blockOutputStreamEntryPool.getCommitUploadPartInfo(); - } - - @VisibleForTesting - public ExcludeList getExcludeList() { - return blockOutputStreamEntryPool.getExcludeList(); - } - - @Override - public Map getMetadata() { - return this.blockOutputStreamEntryPool.getMetadata(); - } - /** * Builder class of ECKeyOutputStream. */ @@ -682,9 +624,8 @@ public ECKeyOutputStream build() { */ private void checkNotClosed() throws IOException { if (closing || closed) { - throw new IOException( - ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " - + blockOutputStreamEntryPool.getKeyName()); + throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + + getBlockOutputStreamEntryPool().getKeyName()); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 8b128e9cd945..9ea17cf8b254 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -69,7 +69,6 @@ public class KeyOutputStream extends OutputStream implements Syncable, KeyMetadataAware { - private OzoneClientConfig config; private final ReplicationConfig replication; /** @@ -105,11 +104,8 @@ enum StreamAction { */ private boolean atomicKeyCreation; - public KeyOutputStream(ReplicationConfig replicationConfig, - ContainerClientMetrics clientMetrics, OzoneClientConfig clientConfig, - StreamBufferArgs streamBufferArgs) { + public KeyOutputStream(ReplicationConfig replicationConfig, BlockOutputStreamEntryPool blockOutputStreamEntryPool) { this.replication = replicationConfig; - this.config = clientConfig; closed = false; this.retryPolicyMap = HddsClientUtils.getExceptionList() .stream() @@ -117,18 +113,16 @@ public KeyOutputStream(ReplicationConfig replicationConfig, e -> RetryPolicies.TRY_ONCE_THEN_FAIL)); retryCount = 0; offset = 0; - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool(clientMetrics, clientConfig, streamBufferArgs); + this.blockOutputStreamEntryPool = blockOutputStreamEntryPool; } - @VisibleForTesting - public List getStreamEntries() { - return blockOutputStreamEntryPool.getStreamEntries(); + protected BlockOutputStreamEntryPool getBlockOutputStreamEntryPool() { + return blockOutputStreamEntryPool; } @VisibleForTesting - public XceiverClientFactory getXceiverClientFactory() { - return blockOutputStreamEntryPool.getXceiverClientFactory(); + public List getStreamEntries() { + return blockOutputStreamEntryPool.getStreamEntries(); } @VisibleForTesting @@ -146,39 +140,18 @@ public long getClientID() { return clientID; } - @SuppressWarnings({"parameternumber", "squid:S00107"}) - public KeyOutputStream( - OzoneClientConfig config, - OpenKeySession handler, - XceiverClientFactory xceiverClientManager, - OzoneManagerProtocol omClient, - String requestId, ReplicationConfig replicationConfig, - String uploadID, int partNumber, boolean isMultipart, - boolean unsafeByteBufferConversion, - ContainerClientMetrics clientMetrics, - boolean atomicKeyCreation, StreamBufferArgs streamBufferArgs - ) { - this.config = config; - this.replication = replicationConfig; - blockOutputStreamEntryPool = - new BlockOutputStreamEntryPool( - config, - omClient, - requestId, replicationConfig, - uploadID, partNumber, - isMultipart, handler.getKeyInfo(), - unsafeByteBufferConversion, - xceiverClientManager, - handler.getId(), - clientMetrics, streamBufferArgs); + public KeyOutputStream(Builder b) { + this.replication = b.replicationConfig; + this.blockOutputStreamEntryPool = new BlockOutputStreamEntryPool(b); + final OzoneClientConfig config = b.getClientConfig(); this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException( config.getMaxRetryCount(), config.getRetryInterval()); this.retryCount = 0; this.isException = false; this.writeOffset = 0; - this.clientID = handler.getId(); - this.atomicKeyCreation = atomicKeyCreation; - this.streamBufferArgs = streamBufferArgs; + this.clientID = b.getOpenHandler().getId(); + this.atomicKeyCreation = b.getAtomicKeyCreation(); + this.streamBufferArgs = b.getStreamBufferArgs(); } /** @@ -192,10 +165,8 @@ public KeyOutputStream( * * @param version the set of blocks that are pre-allocated. * @param openVersion the version corresponding to the pre-allocation. - * @throws IOException */ - public synchronized void addPreallocateBlocks(OmKeyLocationInfoGroup version, - long openVersion) throws IOException { + public synchronized void addPreallocateBlocks(OmKeyLocationInfoGroup version, long openVersion) { blockOutputStreamEntryPool.addPreallocateBlocks(version, openVersion); } @@ -729,20 +700,7 @@ public boolean getAtomicKeyCreation() { } public KeyOutputStream build() { - return new KeyOutputStream( - clientConfig, - openHandler, - xceiverManager, - omClient, - requestID, - replicationConfig, - multipartUploadID, - multipartNumber, - isMultipartKey, - unsafeByteBufferConversion, - clientMetrics, - atomicKeyCreation, - streamBufferArgs); + return new KeyOutputStream(this); } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java index 7760e88e484a..718e724e5854 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockOutputStreamEntry.java @@ -63,10 +63,10 @@ public class TestECBlockOutputStreamEntry { try (XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration())) { HashSet clients = new HashSet<>(); - ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder() - .setXceiverClientManager(manager) - .setPipeline(anECPipeline) - .build(); + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setXceiverClientManager(manager) + .setPipeline(anECPipeline); + final ECBlockOutputStreamEntry entry = b.build(); for (int i = 0; i < nodes.size(); i++) { clients.add( manager.acquireClient( @@ -101,10 +101,10 @@ public class TestECBlockOutputStreamEntry { try (XceiverClientManager manager = new XceiverClientManager(new OzoneConfiguration())) { HashSet clients = new HashSet<>(); - ECBlockOutputStreamEntry entry = new ECBlockOutputStreamEntry.Builder() - .setXceiverClientManager(manager) - .setPipeline(anECPipeline) - .build(); + final ECBlockOutputStreamEntry.Builder b = new ECBlockOutputStreamEntry.Builder(); + b.setXceiverClientManager(manager) + .setPipeline(anECPipeline); + final ECBlockOutputStreamEntry entry = b.build(); for (int i = 0; i < nodes.size(); i++) { clients.add( manager.acquireClient( diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java index da2fb26ec8f5..ca3caa4ee777 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneOutputStreamStub.java @@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.io.KeyMetadataAware; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.scm.StreamBufferArgs; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -81,10 +79,7 @@ public KeyOutputStream getKeyOutputStream() { OzoneConfiguration conf = new OzoneConfiguration(); ReplicationConfig replicationConfig = ReplicationConfig.getDefault(conf); - OzoneClientConfig ozoneClientConfig = conf.getObject(OzoneClientConfig.class); - StreamBufferArgs streamBufferArgs = - StreamBufferArgs.getDefaultStreamBufferArgs(replicationConfig, ozoneClientConfig); - return new KeyOutputStream(replicationConfig, null, ozoneClientConfig, streamBufferArgs) { + return new KeyOutputStream(replicationConfig, null) { @Override public synchronized OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() { From 932a0ac93bfb9143e0928111edf20c5768370d14 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 20 Feb 2024 07:07:23 +0100 Subject: [PATCH 032/108] HDDS-10342. Reduce code duplication in MiniOzoneCluster builders (#6206) --- .../hadoop/ozone/MiniOzoneChaosCluster.java | 3 +- .../apache/hadoop/ozone/MiniOzoneCluster.java | 41 +---- .../hadoop/ozone/MiniOzoneClusterImpl.java | 170 ++++++++---------- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 69 +++---- 4 files changed, 95 insertions(+), 188 deletions(-) diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 26f896663b81..76da4a5a8cab 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -313,8 +313,7 @@ public MiniOzoneChaosCluster build() throws IOException { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } - final List hddsDatanodes = createHddsDatanodes( - scmService.getActiveServices(), null); + final List hddsDatanodes = createHddsDatanodes(); MiniOzoneChaosCluster cluster = new MiniOzoneChaosCluster(conf, omService, scmService, hddsDatanodes, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 667f7448a1bb..c8e32a7917d2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -93,7 +93,7 @@ static Builder newHABuilder(OzoneConfiguration conf) { void waitForClusterToBeReady() throws TimeoutException, InterruptedException; /** - * Waits for atleast one RATIS pipeline of given factor to be reported in open + * Waits for at least one RATIS pipeline of given factor to be reported in open * state. * * @param factor replication factor @@ -121,21 +121,6 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, */ void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException; - /** - * Returns OzoneManager Service ID. - * - * @return Service ID String - */ - String getOMServiceId(); - - - /** - * Returns StorageContainerManager Service ID. - * - * @return Service ID String - */ - String getSCMServiceId(); - /** * Returns {@link StorageContainerManager} associated with this * {@link MiniOzoneCluster} instance. @@ -180,20 +165,12 @@ void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, /** * Returns StorageContainerLocationClient to communicate with * {@link StorageContainerManager} associated with the MiniOzoneCluster. - * - * @return StorageContainerLocation Client - * @throws IOException */ StorageContainerLocationProtocolClientSideTranslatorPB getStorageContainerLocationClient() throws IOException; /** * Restarts StorageContainerManager instance. - * - * @param waitForDatanode - * @throws IOException - * @throws TimeoutException - * @throws InterruptedException */ void restartStorageContainerManager(boolean waitForDatanode) throws InterruptedException, TimeoutException, IOException, @@ -201,8 +178,6 @@ void restartStorageContainerManager(boolean waitForDatanode) /** * Restarts OzoneManager instance. - * - * @throws IOException */ void restartOzoneManager() throws IOException; @@ -266,11 +241,6 @@ default void close() { */ void stop(); - /** - * Start Scm. - */ - void startScm() throws IOException; - /** * Start DataNodes. */ @@ -373,13 +343,6 @@ public Builder setStartDataNodes(boolean nodes) { return this; } - /** - * Sets the certificate client. - * - * @param client - * - * @return MiniOzoneCluster.Builder - */ public Builder setCertificateClient(CertificateClient client) { this.certClient = client; return this; @@ -477,8 +440,6 @@ public Builder setDnLayoutVersion(int layoutVersion) { * Constructs and returns MiniOzoneCluster. * * @return {@link MiniOzoneCluster} - * - * @throws IOException */ public abstract MiniOzoneCluster build() throws IOException; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index dd9b83e66db6..0e71063600da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -81,6 +81,8 @@ import org.apache.ozone.test.GenericTestUtils; import org.apache.commons.io.FileUtils; + +import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; @@ -113,6 +115,8 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private static final Logger LOG = LoggerFactory.getLogger(MiniOzoneClusterImpl.class); + private static final String[] NO_ARGS = new String[0]; + static { CodecBuffer.enableLeakDetection(); } @@ -132,15 +136,13 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { /** * Creates a new MiniOzoneCluster with Recon. - * - * @throws IOException if there is an I/O error */ - MiniOzoneClusterImpl(OzoneConfiguration conf, - SCMConfigurator scmConfigurator, - OzoneManager ozoneManager, - StorageContainerManager scm, - List hddsDatanodes, - ReconServer reconServer) { + private MiniOzoneClusterImpl(OzoneConfiguration conf, + SCMConfigurator scmConfigurator, + OzoneManager ozoneManager, + StorageContainerManager scm, + List hddsDatanodes, + ReconServer reconServer) { this.conf = conf; this.ozoneManager = ozoneManager; this.scm = scm; @@ -154,9 +156,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { * StorageContainerManager. This is used by * {@link MiniOzoneHAClusterImpl} for starting multiple * OzoneManagers and StorageContainerManagers. - * - * @param conf - * @param hddsDatanodes */ MiniOzoneClusterImpl(OzoneConfiguration conf, SCMConfigurator scmConfigurator, List hddsDatanodes, ReconServer reconServer) { @@ -180,18 +179,6 @@ public void setConf(OzoneConfiguration newConf) { this.conf = newConf; } - @Override - public String getOMServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - - @Override - public String getSCMServiceId() { - // Non-HA cluster doesn't have OM Service Id. - return null; - } - public void waitForSCMToBeReady() throws TimeoutException, InterruptedException { if (SCMHAUtils.isSCMHAEnabled(conf)) { @@ -204,9 +191,6 @@ public StorageContainerManager getActiveSCM() { return scm; } - /** - * Waits for the Ozone cluster to be ready for processing requests. - */ @Override public void waitForClusterToBeReady() throws TimeoutException, InterruptedException { @@ -230,10 +214,6 @@ public void waitForClusterToBeReady() }, 1000, waitForClusterToBeReadyTimeout); } - /** - * Waits for atleast one RATIS pipeline of given factor to be reported in open - * state. - */ @Override public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, int timeoutInMs) throws @@ -246,24 +226,11 @@ public void waitForPipelineTobeReady(HddsProtos.ReplicationFactor factor, }, 1000, timeoutInMs); } - /** - * Sets the timeout value after which - * {@link MiniOzoneClusterImpl#waitForClusterToBeReady} times out. - * - * @param timeoutInMs timeout value in milliseconds - */ @Override public void setWaitForClusterToBeReadyTimeout(int timeoutInMs) { waitForClusterToBeReadyTimeout = timeoutInMs; } - /** - * Waits for SCM to be out of Safe Mode. Many tests can be run iff we are out - * of Safe mode. - * - * @throws TimeoutException - * @throws InterruptedException - */ @Override public void waitTobeOutOfSafeMode() throws TimeoutException, InterruptedException { @@ -404,8 +371,7 @@ public void restartHddsDatanode(int i, boolean waitForDatanode) // wait for node to be removed from SCM healthy node list. waitForHddsDatanodeToStop(datanodeService.getDatanodeDetails()); } - String[] args = new String[] {}; - HddsDatanodeService service = new HddsDatanodeService(args); + HddsDatanodeService service = new HddsDatanodeService(NO_ARGS); service.setConfiguration(config); hddsDatanodes.add(i, service); startHddsDatanode(service); @@ -461,15 +427,7 @@ public void stop() { stopRecon(reconServer); } - /** - * Start Scm. - */ - @Override - public void startScm() throws IOException { - scm.start(); - } - - public void startHddsDatanode(HddsDatanodeService datanode) { + private void startHddsDatanode(HddsDatanodeService datanode) { try { datanode.setCertificateClient(getCAClient()); } catch (IOException e) { @@ -479,9 +437,6 @@ public void startHddsDatanode(HddsDatanodeService datanode) { datanode.start(); } - /** - * Start DataNodes. - */ @Override public void startHddsDatanodes() { hddsDatanodes.forEach(this::startHddsDatanode); @@ -501,7 +456,7 @@ public void shutdownHddsDatanodes() { @Override public void startRecon() { reconServer = new ReconServer(); - reconServer.execute(new String[]{}); + reconServer.execute(NO_ARGS); } @Override @@ -587,25 +542,10 @@ public MiniOzoneCluster build() throws IOException { ReconServer reconServer = null; List hddsDatanodes = Collections.emptyList(); try { - scm = createSCM(); - scm.start(); - om = createOM(); - if (certClient != null) { - om.setCertClient(certClient); - } - if (secretKeyClient != null) { - om.setSecretKeyClient(secretKeyClient); - } - om.start(); - - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } - - hddsDatanodes = createHddsDatanodes( - Collections.singletonList(scm), reconServer); + scm = createAndStartSingleSCM(); + om = createAndStartSingleOM(); + reconServer = createRecon(); + hddsDatanodes = createHddsDatanodes(); MiniOzoneClusterImpl cluster = new MiniOzoneClusterImpl(conf, scmConfigurator, om, scm, @@ -638,10 +578,17 @@ public MiniOzoneCluster build() throws IOException { } } + protected void setClients(OzoneManager om) throws IOException { + if (certClient != null) { + om.setCertClient(certClient); + } + if (secretKeyClient != null) { + om.setSecretKeyClient(secretKeyClient); + } + } + /** * Initializes the configuration required for starting MiniOzoneCluster. - * - * @throws IOException */ protected void initializeConfiguration() throws IOException { Path metaDir = Paths.get(path, "ozone-meta"); @@ -665,11 +612,18 @@ void removeConfiguration() { FileUtils.deleteQuietly(new File(path)); } + protected StorageContainerManager createAndStartSingleSCM() + throws AuthenticationException, IOException { + StorageContainerManager scm = createSCM(); + scm.start(); + configureScmDatanodeAddress(singletonList(scm)); + return scm; + } + /** * Creates a new StorageContainerManager instance. * * @return {@link StorageContainerManager} - * @throws IOException */ protected StorageContainerManager createSCM() throws IOException, AuthenticationException { @@ -688,6 +642,7 @@ protected StorageContainerManager createSCM() } return scm; } + protected void initializeScmStorage(SCMStorageConfig scmStore) throws IOException { if (scmStore.getState() == StorageState.INITIALIZED) { @@ -723,11 +678,17 @@ void initializeOmStorage(OMStorage omStorage) throws IOException { omStorage.initialize(); } + protected OzoneManager createAndStartSingleOM() throws AuthenticationException, IOException { + OzoneManager om = createOM(); + setClients(om); + om.start(); + return om; + } + /** * Creates a new OzoneManager instance. * * @return {@link OzoneManager} - * @throws IOException */ protected OzoneManager createOM() throws IOException, AuthenticationException { @@ -737,14 +698,15 @@ protected OzoneManager createOM() return OzoneManager.createOm(conf); } - protected String getSCMAddresses(List scms) { + private String getSCMAddresses(List scms) { StringBuilder stringBuilder = new StringBuilder(); Iterator iter = scms.iterator(); while (iter.hasNext()) { StorageContainerManager scm = iter.next(); - stringBuilder.append(scm.getDatanodeRpcAddress().getHostString() + - ":" + scm.getDatanodeRpcAddress().getPort()); + stringBuilder.append(scm.getDatanodeRpcAddress().getHostString()) + .append(":") + .append(scm.getDatanodeRpcAddress().getPort()); if (iter.hasNext()) { stringBuilder.append(","); } @@ -753,23 +715,38 @@ protected String getSCMAddresses(List scms) { return stringBuilder.toString(); } + protected void configureScmDatanodeAddress(List scms) { + conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, getSCMAddresses(scms)); + } + + protected ReconServer createRecon() { + ReconServer reconServer = null; + if (includeRecon) { + configureRecon(); + reconServer = new ReconServer(); + reconServer.execute(NO_ARGS); + + OzoneStorageContainerManager reconScm = + reconServer.getReconStorageContainerManager(); + conf.set(OZONE_RECON_ADDRESS_KEY, + reconScm.getDatanodeRpcAddress().getHostString() + ":" + + reconScm.getDatanodeRpcAddress().getPort()); + } + return reconServer; + } + /** * Creates HddsDatanodeService(s) instance. * * @return List of HddsDatanodeService - * @throws IOException */ - protected List createHddsDatanodes( - List scms, ReconServer reconServer) + protected List createHddsDatanodes() throws IOException { - String scmAddress = getSCMAddresses(scms); - String[] args = new String[] {}; - conf.setStrings(ScmConfigKeys.OZONE_SCM_NAMES, scmAddress); List hddsDatanodes = new ArrayList<>(); for (int i = 0; i < numOfDatanodes; i++) { OzoneConfiguration dnConf = new OzoneConfiguration(conf); configureDatanodePorts(dnConf); - String datanodeBaseDir = path + "/datanode-" + Integer.toString(i); + String datanodeBaseDir = path + "/datanode-" + i; Path metaDir = Paths.get(datanodeBaseDir, "meta"); List dataDirs = new ArrayList<>(); List reservedSpaceList = new ArrayList<>(); @@ -795,15 +772,8 @@ protected List createHddsDatanodes( reservedSpaceString); dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); - if (reconServer != null) { - OzoneStorageContainerManager reconScm = - reconServer.getReconStorageContainerManager(); - dnConf.set(OZONE_RECON_ADDRESS_KEY, - reconScm.getDatanodeRpcAddress().getHostString() + ":" + - reconScm.getDatanodeRpcAddress().getPort()); - } - HddsDatanodeService datanode = new HddsDatanodeService(args); + HddsDatanodeService datanode = new HddsDatanodeService(NO_ARGS); datanode.setConfiguration(dnConf); hddsDatanodes.add(datanode); } @@ -859,7 +829,7 @@ protected void configureDatanodePorts(ConfigurationTarget conf) { conf.setFromObject(new ReplicationConfig().setPort(getFreePort())); } - protected void configureRecon() throws IOException { + protected void configureRecon() { ConfigurationProvider.resetConfiguration(); File tempNewFolder = new File(path, "recon"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index ceade72e7d4d..928ae907c65b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -80,11 +80,6 @@ public class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl { private static final int RATIS_RPC_TIMEOUT = 1000; // 1 second public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds - /** - * Creates a new MiniOzoneCluster. - * - * @throws IOException if there is an I/O error - */ public MiniOzoneHAClusterImpl( OzoneConfiguration conf, SCMConfigurator scmConfigurator, @@ -99,19 +94,8 @@ public MiniOzoneHAClusterImpl( this.clusterMetaPath = clusterPath; } - @Override - public String getOMServiceId() { - return omhaService.getServiceId(); - } - - @Override - public String getSCMServiceId() { - return scmhaService.getServiceId(); - } - /** * Returns the first OzoneManager from the list. - * @return */ @Override public OzoneManager getOzoneManager() { @@ -353,12 +337,12 @@ private static void configureOMPorts(ConfigurationTarget conf, public static class Builder extends MiniOzoneClusterImpl.Builder { private static final String OM_NODE_ID_PREFIX = "omNode-"; - private List activeOMs = new ArrayList<>(); - private List inactiveOMs = new ArrayList<>(); + private final List activeOMs = new ArrayList<>(); + private final List inactiveOMs = new ArrayList<>(); private static final String SCM_NODE_ID_PREFIX = "scmNode-"; - private List activeSCMs = new ArrayList<>(); - private List inactiveSCMs = new ArrayList<>(); + private final List activeSCMs = new ArrayList<>(); + private final List inactiveSCMs = new ArrayList<>(); /** * Creates a new Builder. @@ -397,21 +381,16 @@ public MiniOzoneCluster build() throws IOException { initOMRatisConf(); SCMHAService scmService; OMHAService omService; - ReconServer reconServer = null; + ReconServer reconServer; try { scmService = createSCMService(); omService = createOMService(); - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } + reconServer = createRecon(); } catch (AuthenticationException ex) { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } - final List hddsDatanodes = createHddsDatanodes( - scmService.getActiveServices(), reconServer); + final List hddsDatanodes = createHddsDatanodes(); MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf, scmConfigurator, omService, scmService, hddsDatanodes, path, @@ -458,8 +437,7 @@ protected void initOMRatisConf() { protected OMHAService createOMService() throws IOException, AuthenticationException { if (omServiceId == null) { - OzoneManager om = createOM(); - om.start(); + OzoneManager om = createAndStartSingleOM(); return new OMHAService(singletonList(om), null, null); } @@ -487,9 +465,7 @@ protected OMHAService createOMService() throws IOException, OzoneManager.omInit(config); OzoneManager om = OzoneManager.createOm(config); - if (certClient != null) { - om.setCertClient(certClient); - } + setClients(om); omList.add(om); if (i <= numOfActiveOMs) { @@ -526,8 +502,7 @@ protected OMHAService createOMService() throws IOException, protected SCMHAService createSCMService() throws IOException, AuthenticationException { if (scmServiceId == null) { - StorageContainerManager scm = createSCM(); - scm.start(); + StorageContainerManager scm = createAndStartSingleSCM(); return new SCMHAService(singletonList(scm), null, null); } @@ -592,6 +567,8 @@ protected SCMHAService createSCMService() } } + configureScmDatanodeAddress(activeSCMs); + return new SCMHAService(activeSCMs, inactiveSCMs, scmServiceId); } @@ -720,7 +697,7 @@ public void bootstrapOzoneManager(String omNodeId, while (true) { try { - OzoneConfiguration newConf = addNewOMToConfig(getOMServiceId(), + OzoneConfiguration newConf = addNewOMToConfig(omhaService.getServiceId(), omNodeId); if (updateConfigs) { @@ -777,7 +754,7 @@ private OzoneConfiguration addNewOMToConfig(String omServiceId, /** * Update the configurations of the given list of OMs. */ - public void updateOMConfigs(OzoneConfiguration newConf) { + private void updateOMConfigs(OzoneConfiguration newConf) { for (OzoneManager om : omhaService.getActiveServices()) { om.setConfiguration(newConf); } @@ -870,17 +847,17 @@ public void setupExitManagerForTesting() { * @param */ static class MiniOzoneHAService { - private Map serviceMap; - private List services; - private String serviceId; - private String serviceName; + private final Map serviceMap; + private final List services; + private final String serviceId; + private final String serviceName; // Active services s denote OM/SCM services which are up and running - private List activeServices; - private List inactiveServices; + private final List activeServices; + private final List inactiveServices; // Function to extract the Id from service - private Function serviceIdProvider; + private final Function serviceIdProvider; MiniOzoneHAService(String name, List activeList, List inactiveList, String serviceId, @@ -1006,8 +983,8 @@ public StorageContainerManager getStorageContainerManager() { private static final class ExitManagerForOM extends ExitManager { - private MiniOzoneHAClusterImpl cluster; - private String omNodeId; + private final MiniOzoneHAClusterImpl cluster; + private final String omNodeId; private ExitManagerForOM(MiniOzoneHAClusterImpl cluster, String nodeId) { this.cluster = cluster; From c8e6cabc8f0edb6183dd09a302570ba7480a9b87 Mon Sep 17 00:00:00 2001 From: Siddhant Sangwan Date: Tue, 20 Feb 2024 12:56:43 +0530 Subject: [PATCH 033/108] HDDS-10345. No need to sort excluded datanodes during Ratis pipeline creation (#6224) --- .../apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 8336bce5eae7..163f42351032 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.Comparator; import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -244,7 +243,6 @@ private List filterPipelineEngagement() { getPipelineStateManager(), d))) .filter(d -> (d.getPipelines() >= getNodeManager().pipelineLimit(d.getDn()))) - .sorted(Comparator.comparingInt(DnWithPipelines::getPipelines)) .map(d -> d.getDn()) .collect(Collectors.toList()); return excluded; From a1f839036a77cb7d6e95dfa181d29bee93a3ea91 Mon Sep 17 00:00:00 2001 From: Cyrill Date: Tue, 20 Feb 2024 21:57:29 +0300 Subject: [PATCH 034/108] HDDS-10010. Support snapshot rename operation (#6006) --- .../hadoop/ozone/client/ObjectStore.java | 15 + .../ozone/client/protocol/ClientProtocol.java | 13 + .../hadoop/ozone/client/rpc/RpcClient.java | 25 ++ .../java/org/apache/hadoop/ozone/OmUtils.java | 1 + .../om/protocol/OzoneManagerProtocol.java | 15 + ...ManagerProtocolClientSideTranslatorPB.java | 43 ++- .../src/main/proto/OmClientProtocol.proto | 15 + .../apache/hadoop/ozone/audit/OMAction.java | 1 + .../hadoop/ozone/om/SnapshotChainManager.java | 8 + .../ratis/utils/OzoneManagerRatisUtils.java | 3 + .../snapshot/OMSnapshotRenameRequest.java | 230 +++++++++++ .../snapshot/OMSnapshotRenameResponse.java | 67 ++++ .../ozone/om/request/OMRequestTestUtils.java | 35 ++ .../snapshot/TestOMSnapshotRenameRequest.java | 359 ++++++++++++++++++ .../fs/ozone/BasicOzoneClientAdapterImpl.java | 10 + .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 6 + .../BasicRootedOzoneClientAdapterImpl.java | 10 + .../fs/ozone/BasicRootedOzoneFileSystem.java | 6 + .../hadoop/fs/ozone/OzoneClientAdapter.java | 2 + .../ozone/client/ClientProtocolStub.java | 7 + .../shell/snapshot/RenameSnapshotHandler.java | 64 ++++ .../shell/snapshot/SnapshotCommands.java | 3 +- 22 files changed, 931 insertions(+), 7 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 481bdbbd5c2a..e96d0f84a437 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -565,6 +565,21 @@ public String createSnapshot(String volumeName, return proxy.createSnapshot(volumeName, bucketName, snapshotName); } + /** + * Rename snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + public void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + proxy.renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + } + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 46e7e20b51b0..492cd31b6722 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1092,6 +1092,19 @@ Map> getKeysEveryReplicas( String createSnapshot(String volumeName, String bucketName, String snapshotName) throws IOException; + /** + * Rename snapshot. + * + * @param volumeName Vol to be used + * @param bucketName Bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException; + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 94d6ae9769dc..3e71262040b9 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -964,6 +964,31 @@ public String createSnapshot(String volumeName, bucketName, snapshotName); } + /** + * Rename Snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + @Override + public void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + Preconditions.checkArgument(StringUtils.isNotBlank(volumeName), + "volume can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(bucketName), + "bucket can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(snapshotOldName), + "old snapshot name can't be null or empty."); + Preconditions.checkArgument(StringUtils.isNotBlank(snapshotNewName), + "new snapshot name can't be null or empty."); + + ozoneManagerClient.renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + } + /** * Delete Snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index f23a703bd0d7..d58d922b0e07 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -319,6 +319,7 @@ public static boolean isReadOnly( case SetRangerServiceVersion: case CreateSnapshot: case DeleteSnapshot: + case RenameSnapshot: case SnapshotMoveDeletedKeys: case SnapshotPurge: case RecoverLease: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 9fc8e82f03ac..ab3f576d4492 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -681,6 +681,21 @@ default String createSnapshot(String volumeName, "this to be implemented"); } + /** + * Rename snapshot. + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + * + * @throws IOException + */ + default void renameSnapshot(String volumeName, + String bucketName, String snapshotOldName, String snapshotNewName) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented"); + } + /** * Delete snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 586410275857..dd201a42620d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -85,6 +85,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CancelPrepareResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateBucketRequest; @@ -106,6 +108,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteTenantResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteVolumeRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeRequest; @@ -121,7 +125,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; @@ -129,14 +132,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListBucketsResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListKeysLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListMultipartUploadsResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListStatusLightResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTenantResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListTrashRequest; @@ -161,6 +164,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneAclInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProtoLight; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareRequestArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrepareStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PrintCompactionLogDagRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RangerBGSyncResponse; @@ -172,12 +180,14 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysMap; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameSnapshotRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RevokeS3SecretRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Secret; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SafeMode; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ServiceListRequest; @@ -185,12 +195,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSafeModeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetTimesRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetVolumePropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignAdminRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantAssignUserAccessIdResponse; @@ -202,8 +215,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.TenantRevokeUserAccessIdRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCResponse; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.security.acl.OzoneObj; @@ -1224,6 +1235,26 @@ public String createSnapshot(String volumeName, return snapshotInfo.getName(); } + /** + * {@inheritDoc} + */ + @Override + public void renameSnapshot(String volumeName, String bucketName, + String snapshotOldName, String snapshotNewName) throws IOException { + RenameSnapshotRequest.Builder requestBuilder = + RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName); + + final OMRequest omRequest = createOMRequest(Type.RenameSnapshot) + .setRenameSnapshotRequest(requestBuilder) + .build(); + final OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + } + /** * {@inheritDoc} */ diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 5c737fdad928..b0d26020c8d2 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -146,6 +146,7 @@ enum Type { SetSnapshotProperty = 128; ListStatusLight = 129; GetSnapshotInfo = 130; + RenameSnapshot = 131; } enum SafeMode { @@ -281,6 +282,7 @@ message OMRequest { optional MultipartUploadsExpiredAbortRequest multipartUploadsExpiredAbortRequest = 126; optional SetSnapshotPropertyRequest SetSnapshotPropertyRequest = 127; optional SnapshotInfoRequest SnapshotInfoRequest = 128; + optional RenameSnapshotRequest RenameSnapshotRequest = 129; } message OMResponse { @@ -403,6 +405,7 @@ message OMResponse { optional ListStatusLightResponse listStatusLightResponse = 129; optional SnapshotInfoResponse SnapshotInfoResponse = 130; optional OMLockDetailsProto omLockDetails = 131; + optional RenameSnapshotResponse RenameSnapshotResponse = 132; } enum Status { @@ -1830,6 +1833,14 @@ message CreateSnapshotRequest { optional uint64 creationTime = 5; } +message RenameSnapshotRequest { + optional string volumeName = 1; + optional string bucketName = 2; + optional string snapshotOldName = 3; + optional string snapshotNewName = 4; + optional uint64 renameTime = 5; +} + message ListSnapshotRequest { optional string volumeName = 1; optional string bucketName = 2; @@ -1992,6 +2003,10 @@ message DeleteSnapshotResponse { } +message RenameSnapshotResponse { + optional SnapshotInfo snapshotInfo = 1; +} + message SnapshotInfoResponse { optional SnapshotInfo snapshotInfo = 1; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java index 4e9039252fee..4804b317bc7d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java @@ -98,6 +98,7 @@ public enum OMAction implements AuditAction { CREATE_SNAPSHOT, LIST_SNAPSHOT, DELETE_SNAPSHOT, + RENAME_SNAPSHOT, SNAPSHOT_MOVE_DELETED_KEYS, SNAPSHOT_INFO, SET_TIMES, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index 18deca1a4ff0..60353590e75c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -348,6 +348,14 @@ public synchronized void addSnapshot(SnapshotInfo snapshotInfo) snapshotInfo.getTableKey()); } + /** + * Update snapshot chain when snapshot changes (e.g. renamed). + */ + public synchronized void updateSnapshot(SnapshotInfo snapshotInfo) { + snapshotIdToTableKey.computeIfPresent(snapshotInfo.getSnapshotId(), + (snapshotId, dbTableKey) -> snapshotInfo.getTableKey()); + } + /** * Delete snapshot from snapshot chain. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 3ab65346e7eb..b055a1f92f82 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -79,6 +79,7 @@ import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotDeleteRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotMoveDeletedKeysRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotPurgeRequest; +import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotRenameRequest; import org.apache.hadoop.ozone.om.request.snapshot.OMSnapshotSetPropertyRequest; import org.apache.hadoop.ozone.om.request.upgrade.OMCancelPrepareRequest; import org.apache.hadoop.ozone.om.request.upgrade.OMFinalizeUpgradeRequest; @@ -224,6 +225,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, return new OMSnapshotCreateRequest(omRequest); case DeleteSnapshot: return new OMSnapshotDeleteRequest(omRequest); + case RenameSnapshot: + return new OMSnapshotRenameRequest(omRequest); case SnapshotMoveDeletedKeys: return new OMSnapshotMoveDeletedKeysRequest(omRequest); case SnapshotPurge: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java new file mode 100644 index 000000000000..9f1875f65d89 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -0,0 +1,230 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.snapshot; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; + +import java.io.IOException; +import java.nio.file.InvalidPathException; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotRenameResponse; +import org.apache.hadoop.ozone.om.snapshot.RequireSnapshotFeatureState; +import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameSnapshotRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.UserInfo; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Time; +import org.apache.ratis.server.protocol.TermIndex; + +/** + * Changes snapshot name. + */ +public class OMSnapshotRenameRequest extends OMClientRequest { + + public OMSnapshotRenameRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) + @RequireSnapshotFeatureState(true) + public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { + final OMRequest omRequest = super.preExecute(ozoneManager); + + final RenameSnapshotRequest renameSnapshotRequest = + omRequest.getRenameSnapshotRequest(); + + final String snapshotNewName = renameSnapshotRequest.getSnapshotNewName(); + + OmUtils.validateSnapshotName(snapshotNewName); + + String volumeName = renameSnapshotRequest.getVolumeName(); + String bucketName = renameSnapshotRequest.getBucketName(); + + // Permission check + UserGroupInformation ugi = createUGIForApi(); + String bucketOwner = ozoneManager.getBucketOwner(volumeName, bucketName, + IAccessAuthorizer.ACLType.READ, OzoneObj.ResourceType.BUCKET); + if (!ozoneManager.isAdmin(ugi) && + !ozoneManager.isOwner(ugi, bucketOwner)) { + throw new OMException( + "Only bucket owners and Ozone admins can rename snapshots", + OMException.ResultCodes.PERMISSION_DENIED); + } + + // Set rename time here so OM leader and follower would have the + // exact same timestamp. + OMRequest.Builder omRequestBuilder = omRequest.toBuilder() + .setRenameSnapshotRequest( + RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotNewName(snapshotNewName) + .setSnapshotOldName(renameSnapshotRequest.getSnapshotOldName()) + .setRenameTime(Time.now())); + + return omRequestBuilder.build(); + } + + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + TermIndex termIndex) { + boolean acquiredBucketLock = false; + boolean acquiredSnapshotOldLock = false; + boolean acquiredSnapshotNewLock = false; + Exception exception = null; + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) + ozoneManager.getMetadataManager(); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + OMClientResponse omClientResponse = null; + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + + UserInfo userInfo = getOmRequest().getUserInfo(); + + final RenameSnapshotRequest request = + getOmRequest().getRenameSnapshotRequest(); + + final String volumeName = request.getVolumeName(); + final String bucketName = request.getBucketName(); + final String snapshotNewName = request.getSnapshotNewName(); + final String snapshotOldName = request.getSnapshotOldName(); + + SnapshotInfo snapshotOldInfo = null; + + try { + // Acquire bucket lock + mergeOmLockDetails( + omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName)); + acquiredBucketLock = getOmLockDetails().isLockAcquired(); + + mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(SNAPSHOT_LOCK, + volumeName, bucketName, snapshotOldName)); + acquiredSnapshotOldLock = getOmLockDetails().isLockAcquired(); + + mergeOmLockDetails(omMetadataManager.getLock().acquireWriteLock(SNAPSHOT_LOCK, + volumeName, bucketName, snapshotNewName)); + acquiredSnapshotNewLock = getOmLockDetails().isLockAcquired(); + + // Retrieve SnapshotInfo from the table + String snapshotNewTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, snapshotNewName); + + if (omMetadataManager.getSnapshotInfoTable().isExist(snapshotNewTableKey)) { + throw new OMException("Snapshot with name " + snapshotNewName + "already exist", + FILE_ALREADY_EXISTS); + } + + // Retrieve SnapshotInfo from the table + String snapshotOldTableKey = SnapshotInfo.getTableKey(volumeName, bucketName, + snapshotOldName); + snapshotOldInfo = + omMetadataManager.getSnapshotInfoTable().get(snapshotOldTableKey); + + if (snapshotOldInfo == null) { + // Snapshot does not exist + throw new OMException("Snapshot with name " + snapshotOldName + "does not exist", + FILE_NOT_FOUND); + } + + switch (snapshotOldInfo.getSnapshotStatus()) { + case SNAPSHOT_DELETED: + throw new OMException("Snapshot is already deleted. " + + "Pending reclamation.", FILE_NOT_FOUND); + case SNAPSHOT_ACTIVE: + break; + default: + // Unknown snapshot non-active state + throw new OMException("Snapshot exists but no longer in active state", + FILE_NOT_FOUND); + } + + snapshotOldInfo.setName(snapshotNewName); + + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshotOldTableKey), + CacheValue.get(termIndex.getIndex())); + + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(snapshotNewTableKey), + CacheValue.get(termIndex.getIndex(), snapshotOldInfo)); + + omMetadataManager.getSnapshotChainManager().updateSnapshot(snapshotOldInfo); + + omResponse.setRenameSnapshotResponse( + OzoneManagerProtocolProtos.RenameSnapshotResponse.newBuilder() + .setSnapshotInfo(snapshotOldInfo.getProtobuf())); + omClientResponse = new OMSnapshotRenameResponse( + omResponse.build(), snapshotOldTableKey, snapshotNewTableKey, snapshotOldInfo); + + } catch (IOException | InvalidPathException ex) { + exception = ex; + omClientResponse = new OMSnapshotRenameResponse( + createErrorOMResponse(omResponse, exception)); + } finally { + if (acquiredSnapshotNewLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_LOCK, volumeName, + bucketName, snapshotNewName)); + } + if (acquiredSnapshotOldLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_LOCK, volumeName, + bucketName, snapshotOldName)); + } + if (acquiredBucketLock) { + mergeOmLockDetails(omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } + } + + if (snapshotOldInfo == null) { + // Dummy SnapshotInfo for logging and audit logging when erred + snapshotOldInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotOldName, null, Time.now()); + } + + // Perform audit logging outside the lock + auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_SNAPSHOT, + snapshotOldInfo.toAuditMap(), exception, userInfo)); + return omClientResponse; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java new file mode 100644 index 000000000000..05bb16a8f514 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotRenameResponse.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.snapshot; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE; + +import jakarta.annotation.Nonnull; +import java.io.IOException; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +/** + * Response for OMSnapshotRenameRequest. + */ +@CleanupTableInfo(cleanupTables = {SNAPSHOT_INFO_TABLE}) +public class OMSnapshotRenameResponse extends OMClientResponse { + + private String snapshotOldName; + private String snapshotNewName; + private SnapshotInfo renamedInfo; + + public OMSnapshotRenameResponse(OzoneManagerProtocolProtos.OMResponse omResponse, + String snapshotOldName, String snapshotNewName, + @Nonnull SnapshotInfo renamedInfo) { + super(omResponse); + this.snapshotOldName = snapshotOldName; + this.snapshotNewName = snapshotNewName; + this.renamedInfo = renamedInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMSnapshotRenameResponse(@Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) + throws IOException { + omMetadataManager.getSnapshotInfoTable() + .putWithBatch(batchOperation, snapshotNewName, renamedInfo); + omMetadataManager.getSnapshotInfoTable() + .deleteWithBatch(batchOperation, snapshotOldName); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index 1bd642fce7d6..e85675e9b079 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -1298,6 +1298,41 @@ public static OMRequest createSnapshotRequest(String volumeName, .build(); } + /** + * Create OMRequest for Rename Snapshot. + * + * @param volumeName vol to be used + * @param bucketName bucket to be used + * @param snapshotOldName Old name of the snapshot + * @param snapshotNewName New name of the snapshot + */ + public static OMRequest renameSnapshotRequest(String volumeName, + String bucketName, + String snapshotOldName, + String snapshotNewName) { + OzoneManagerProtocolProtos.RenameSnapshotRequest renameSnapshotRequest = + OzoneManagerProtocolProtos.RenameSnapshotRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setSnapshotOldName(snapshotOldName) + .setSnapshotNewName(snapshotNewName) + .build(); + + OzoneManagerProtocolProtos.UserInfo userInfo = + OzoneManagerProtocolProtos.UserInfo.newBuilder() + .setUserName("user") + .setHostName("host") + .setRemoteAddress("remote-address") + .build(); + + return OMRequest.newBuilder() + .setRenameSnapshotRequest(renameSnapshotRequest) + .setCmdType(Type.RenameSnapshot) + .setClientId(UUID.randomUUID().toString()) + .setUserInfo(userInfo) + .build(); + } + /** * Create OMRequest for Delete Snapshot. * @param volumeName vol to be used diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java new file mode 100644 index 000000000000..14af3e28b8b8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotRenameRequest.java @@ -0,0 +1,359 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.request.snapshot; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditMessage; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.util.UUID; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getFromProtobuf; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.getTableKey; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createSnapshotRequest; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.renameSnapshotRequest; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RenameSnapshot; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.framework; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests OMSnapshotRenameRequest class, which handles RenameSnapshot request. + */ +public class TestOMSnapshotRenameRequest { + + @TempDir + private File anotherTempDir; + + private OzoneManager ozoneManager; + private OMMetrics omMetrics; + private OmMetadataManagerImpl omMetadataManager; + private BatchOperation batchOperation; + + private String volumeName; + private String bucketName; + private String snapshotName1; + private String snapshotName2; + + @BeforeEach + public void setup() throws Exception { + ozoneManager = mock(OzoneManager.class); + omMetrics = OMMetrics.create(); + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, + anotherTempDir.getAbsolutePath()); + ozoneConfiguration.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + anotherTempDir.getAbsolutePath()); + omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, + ozoneManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); + when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.isRatisEnabled()).thenReturn(true); + when(ozoneManager.isFilesystemSnapshotEnabled()).thenReturn(true); + when(ozoneManager.isAdmin(any())).thenReturn(false); + when(ozoneManager.isOwner(any(), any())).thenReturn(false); + when(ozoneManager.getBucketOwner(any(), any(), + any(), any())).thenReturn("dummyBucketOwner"); + OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); + when(lvm.isAllowed(anyString())).thenReturn(true); + when(ozoneManager.getVersionManager()).thenReturn(lvm); + AuditLogger auditLogger = mock(AuditLogger.class); + when(ozoneManager.getAuditLogger()).thenReturn(auditLogger); + doNothing().when(auditLogger).logWrite(any(AuditMessage.class)); + batchOperation = omMetadataManager.getStore().initBatchOperation(); + when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); + OmSnapshotManager omSnapshotManager = new OmSnapshotManager(ozoneManager); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); + + volumeName = UUID.randomUUID().toString(); + bucketName = UUID.randomUUID().toString(); + snapshotName1 = UUID.randomUUID().toString(); + snapshotName2 = UUID.randomUUID().toString(); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + } + + @AfterEach + public void stop() { + omMetrics.unRegister(); + framework().clearInlineMocks(); + if (batchOperation != null) { + batchOperation.close(); + } + } + + @ValueSource(strings = { + // '-' is allowed. + "9cdf0e8a-6946-41ad-a2d1-9eb724fab126", + // 3 chars name is allowed. + "sn1", + // less than or equal to 63 chars are allowed. + "snap75795657617173401188448010125899089001363595171500499231286" + }) + @ParameterizedTest + public void testPreExecute(String toSnapshotName) throws Exception { + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); + doPreExecute(omRequest); + } + + @ValueSource(strings = { + // ? is not allowed in snapshot name. + "a?b", + // only numeric name not allowed. + "1234", + // less than 3 chars are not allowed. + "s1", + // more than or equal to 64 chars are not allowed. + "snap156808943643007724443266605711479126926050896107709081166294", + // Underscore is not allowed. + "snap_1", + // CamelCase is not allowed. + "NewSnapshot" + }) + @ParameterizedTest + public void testPreExecuteFailure(String toSnapshotName) { + when(ozoneManager.isOwner(any(), any())).thenReturn(true); + String currentSnapshotName = "current"; + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, currentSnapshotName, toSnapshotName); + OMException omException = + assertThrows(OMException.class, () -> doPreExecute(omRequest)); + assertEquals("Invalid snapshot name: " + toSnapshotName, + omException.getMessage()); + } + + @Test + public void testPreExecuteBadOwner() { + // Owner is not set for the request. + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); + + OMException omException = assertThrows(OMException.class, + () -> doPreExecute(omRequest)); + assertEquals("Only bucket owners and Ozone admins can rename snapshots", + omException.getMessage()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + OzoneManagerProtocolProtos.OMRequest omRequest = renameSnapshotRequest(volumeName, + bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + String key = getTableKey(volumeName, bucketName, snapshotName1); + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + + // Add a 1000-byte key to the bucket + OmKeyInfo key1 = addKey("key-testValidateAndUpdateCache", 12345L); + addKeyToTable(key1); + + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get( + bucketKey); + long bucketDataSize = key1.getDataSize(); + long bucketUsedBytes = omBucketInfo.getUsedBytes(); + assertEquals(key1.getReplicatedSize(), bucketUsedBytes); + + // Value in cache should be null as of now. + assertNull(omMetadataManager.getSnapshotInfoTable().get(key)); + + // Add key to cache. + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName1, UUID.randomUUID(), Time.now()); + snapshotInfo.setReferencedSize(1000L); + snapshotInfo.setReferencedReplicatedSize(3 * 1000L); + assertEquals(SNAPSHOT_ACTIVE, snapshotInfo.getSnapshotStatus()); + omMetadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(key), + CacheValue.get(1L, snapshotInfo)); + + // Run validateAndUpdateCache. + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 2L); + + assertNotNull(omClientResponse.getOMResponse()); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(RenameSnapshot, omResponse.getCmdType()); + assertEquals(OK, omResponse.getStatus()); + + // verify table data with response data. + OzoneManagerProtocolProtos.SnapshotInfo snapshotInfoProto = + omClientResponse + .getOMResponse() + .getRenameSnapshotResponse() + .getSnapshotInfo(); + + assertEquals(bucketDataSize, snapshotInfoProto.getReferencedSize()); + assertEquals(bucketUsedBytes, + snapshotInfoProto.getReferencedReplicatedSize()); + + SnapshotInfo snapshotInfoOldProto = getFromProtobuf(snapshotInfoProto); + + String key2 = getTableKey(volumeName, bucketName, snapshotName2); + + // Get value from cache + SnapshotInfo snapshotInfoNewInCache = + omMetadataManager.getSnapshotInfoTable().get(key2); + assertNotNull(snapshotInfoNewInCache); + assertEquals(snapshotInfoOldProto, snapshotInfoNewInCache); + assertEquals(snapshotInfo.getSnapshotId(), snapshotInfoNewInCache.getSnapshotId()); + + SnapshotInfo snapshotInfoOldInCache = + omMetadataManager.getSnapshotInfoTable().get(key); + assertNull(snapshotInfoOldInCache); + } + + @Test + public void testEntryExists() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // First make sure we have two snapshots. + OzoneManagerProtocolProtos.OMRequest createOmRequest = + createSnapshotRequest(volumeName, bucketName, snapshotName1); + OMSnapshotCreateRequest omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 1); + + createOmRequest = + createSnapshotRequest(volumeName, bucketName, snapshotName2); + omSnapshotCreateRequest = + TestOMSnapshotCreateRequest.doPreExecute(createOmRequest, ozoneManager); + omSnapshotCreateRequest.validateAndUpdateCache(ozoneManager, 2); + + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // Now try renaming and get an error. + OzoneManagerProtocolProtos.OMRequest omRequest = + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNotNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS, + omResponse.getStatus()); + } + + @Test + public void testEntryNotFound() throws Exception { + when(ozoneManager.isAdmin(any())).thenReturn(true); + + String keyNameOld = getTableKey(volumeName, bucketName, snapshotName1); + String keyNameNew = getTableKey(volumeName, bucketName, snapshotName2); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + // Now try renaming and get an error. + OzoneManagerProtocolProtos.OMRequest omRequest = + renameSnapshotRequest(volumeName, bucketName, snapshotName1, snapshotName2); + OMSnapshotRenameRequest omSnapshotRenameRequest = doPreExecute(omRequest); + + OMClientResponse omClientResponse = + omSnapshotRenameRequest.validateAndUpdateCache(ozoneManager, 3); + + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameOld)); + assertNull(omMetadataManager.getSnapshotInfoTable().get(keyNameNew)); + + OzoneManagerProtocolProtos.OMResponse omResponse = omClientResponse.getOMResponse(); + assertNotNull(omResponse.getRenameSnapshotResponse()); + assertEquals(OzoneManagerProtocolProtos.Status.FILE_NOT_FOUND, + omResponse.getStatus()); + } + + private OMSnapshotRenameRequest doPreExecute( + OzoneManagerProtocolProtos.OMRequest originalRequest) throws Exception { + return doPreExecute(originalRequest, ozoneManager); + } + + public static OMSnapshotRenameRequest doPreExecute( + OzoneManagerProtocolProtos.OMRequest originalRequest, OzoneManager ozoneManager) throws Exception { + OMSnapshotRenameRequest omSnapshotRenameRequest = + new OMSnapshotRenameRequest(originalRequest); + + OzoneManagerProtocolProtos.OMRequest modifiedRequest = + omSnapshotRenameRequest.preExecute(ozoneManager); + return new OMSnapshotRenameRequest(modifiedRequest); + } + + private OmKeyInfo addKey(String keyName, long objectId) { + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(THREE)).setObjectID(objectId) + .build(); + } + + protected String addKeyToTable(OmKeyInfo keyInfo) throws Exception { + OMRequestTestUtils.addKeyToTable(false, true, keyInfo, 0, 0L, + omMetadataManager); + return omMetadataManager.getOzoneKey(keyInfo.getVolumeName(), + keyInfo.getBucketName(), keyInfo.getKeyName()); + } + +} diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index e6892d9784db..1614f81087b1 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -604,6 +604,16 @@ public String createSnapshot(String pathStr, String snapshotName) snapshotName); } + @Override + public void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) + throws IOException { + OFSPath ofsPath = new OFSPath(pathStr, config); + objectStore.renameSnapshot(ofsPath.getVolumeName(), + ofsPath.getBucketName(), + snapshotOldName, + snapshotNewName); + } + @Override public void deleteSnapshot(String pathStr, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index dbe3b517e554..cd09cf1d5a8f 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -954,6 +954,12 @@ public Path createSnapshot(Path path, String snapshotName) OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } + @Override + public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) + throws IOException { + getAdapter().renameSnapshot(pathToKey(path), snapshotOldName, snapshotNewName); + } + @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 880427861205..7a80878549bd 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -1254,6 +1254,16 @@ public String createSnapshot(String pathStr, String snapshotName) snapshotName); } + @Override + public void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) + throws IOException { + OFSPath ofsPath = new OFSPath(pathStr, config); + proxy.renameSnapshot(ofsPath.getVolumeName(), + ofsPath.getBucketName(), + snapshotOldName, + snapshotNewName); + } + @Override public void deleteSnapshot(String pathStr, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index b13d726371c4..1fcb1554b6c3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -532,6 +532,12 @@ public Path createSnapshot(Path path, String snapshotName) OM_SNAPSHOT_INDICATOR + OZONE_URI_DELIMITER + snapshot); } + @Override + public void renameSnapshot(Path path, String snapshotOldName, String snapshotNewName) + throws IOException { + getAdapter().renameSnapshot(pathToKey(path), snapshotOldName, snapshotNewName); + } + @Override public void deleteSnapshot(Path path, String snapshotName) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index c48f1a6366fe..1a6462c1bb3a 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -89,6 +89,8 @@ FileStatusAdapter getFileStatus(String key, URI uri, String createSnapshot(String pathStr, String snapshotName) throws IOException; + void renameSnapshot(String pathStr, String snapshotOldName, String snapshotNewName) throws IOException; + void deleteSnapshot(String pathStr, String snapshotName) throws IOException; SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 7515d991eba0..d9b834c3186d 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -650,6 +650,13 @@ public String createSnapshot(String volumeName, return ""; } + @Override + public void renameSnapshot(String volumeName, String bucketName, + String snapshotOldName, String snapshotNewName) + throws IOException { + + } + @Override public List listSnapshot( String volumeName, String bucketName, String snapshotPrefix, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java new file mode 100644 index 000000000000..63b61b1ec662 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.shell.snapshot; + +import java.io.IOException; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import picocli.CommandLine; + +/** + * ozone sh snapshot rename. + */ +@CommandLine.Command(name = "rename", + description = "Rename a snapshot") +public class RenameSnapshotHandler extends Handler { + + @CommandLine.Mixin + private BucketUri snapshotPath; + + @CommandLine.Parameters(description = "Current snapshot name", + index = "1", arity = "1") + private String snapshotOldName; + + @CommandLine.Parameters(description = "New snapshot name", + index = "2", arity = "1") + private String snapshotNewName; + + @Override + protected OzoneAddress getAddress() { + return snapshotPath.getValue(); + } + + @Override + protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException { + String volumeName = snapshotPath.getValue().getVolumeName(); + String bucketName = snapshotPath.getValue().getBucketName(); + OmUtils.validateSnapshotName(snapshotNewName); + client.getObjectStore() + .renameSnapshot(volumeName, bucketName, snapshotOldName, snapshotNewName); + if (isVerbose()) { + out().format("Renamed snapshot from'%s' to %s under '%s/%s'.%n", + snapshotOldName, snapshotNewName, volumeName, bucketName); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index cf513b9e913f..25a3c1c66fe9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -43,7 +43,8 @@ ListSnapshotHandler.class, SnapshotDiffHandler.class, ListSnapshotDiffHandler.class, - InfoSnapshotHandler.class + InfoSnapshotHandler.class, + RenameSnapshotHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) From db1561ac107536601e2d58ae4fd980d6447b3b0b Mon Sep 17 00:00:00 2001 From: Sumit Agrawal Date: Wed, 21 Feb 2024 11:47:06 +0530 Subject: [PATCH 035/108] HDDS-10385. Memory leak for thread local usages in OMClientRequest. (#6234) --- .../apache/hadoop/ozone/om/request/OMClientRequest.java | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index d0dd2caa54a7..6c8a66ee7ea7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -78,8 +78,7 @@ public abstract class OMClientRequest implements RequestAuditor { private UserGroupInformation userGroupInformation; private InetAddress inetAddress; - private final ThreadLocal omLockDetails = - ThreadLocal.withInitial(OMLockDetails::new); + private final OMLockDetails omLockDetails = new OMLockDetails(); /** * Stores the result of request execution in @@ -94,7 +93,7 @@ public enum Result { public OMClientRequest(OMRequest omRequest) { Preconditions.checkNotNull(omRequest); this.omRequest = omRequest; - this.omLockDetails.get().clear(); + this.omLockDetails.clear(); } /** * Perform pre-execute steps on a OMRequest. @@ -575,10 +574,10 @@ public static String isValidKeyPath(String path) throws OMException { } public OMLockDetails getOmLockDetails() { - return omLockDetails.get(); + return omLockDetails; } public void mergeOmLockDetails(OMLockDetails details) { - omLockDetails.get().merge(details); + omLockDetails.merge(details); } } From 652b9bcc0a1ec2ada72c91908decda26979e28a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 21 Feb 2024 07:47:49 +0100 Subject: [PATCH 036/108] HDDS-10401. Bump commons-compress to 1.26.0 (#6240) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index b6ee69a3300c..2463e1802771 100644 --- a/pom.xml +++ b/pom.xml @@ -116,7 +116,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.0 1.15 3.2.2 - 1.25.0 + 1.26.0 2.8.0 1.5.2-5 1.0.13 From 3a01fea4caa4ae1db938a5d3cfdaf90e82584de1 Mon Sep 17 00:00:00 2001 From: rohit-kb <115476286+rohit-kb@users.noreply.github.com> Date: Wed, 21 Feb 2024 20:17:08 +0530 Subject: [PATCH 037/108] HDDS-10406. Bump aws-java-sdk to 1.12.661 (#6249) --- hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 1 - hadoop-ozone/dist/src/main/license/jar-report.txt | 1 - pom.xml | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 64f67fd4dfde..97942b2ae43f 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -442,7 +442,6 @@ Apache License 2.0 org.xerial.snappy:snappy-java org.xerial:sqlite-jdbc org.yaml:snakeyaml - software.amazon.ion:ion-java MIT ===================== diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index 58feec550034..ce2d4136b113 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -87,7 +87,6 @@ share/ozone/lib/httpclient.jar share/ozone/lib/httpcore.jar share/ozone/lib/httpcore-nio.jar share/ozone/lib/httpmime.jar -share/ozone/lib/ion-java.jar share/ozone/lib/istack-commons-runtime.jar share/ozone/lib/j2objc-annotations.jar share/ozone/lib/jackson-annotations.jar diff --git a/pom.xml b/pom.xml index 2463e1802771..5dce3f2f3573 100644 --- a/pom.xml +++ b/pom.xml @@ -283,7 +283,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.1.0 9.3 1200 - 1.12.632 + 1.12.661 1.15.0 From 9c187df7df65ea581f66dd97e44aabd1817cd35d Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Thu, 22 Feb 2024 13:40:11 +0800 Subject: [PATCH 038/108] HDDS-10398. Remove deleted_blocks table in container schema V2 and V3 definition (#6237) --- .../metadata/AbstractDatanodeDBDefinition.java | 4 ---- .../metadata/AbstractDatanodeStore.java | 10 ++-------- .../DatanodeSchemaOneDBDefinition.java | 1 - .../DatanodeSchemaThreeDBDefinition.java | 18 ------------------ .../DatanodeSchemaTwoDBDefinition.java | 17 ----------------- .../metadata/DatanodeStoreSchemaOneImpl.java | 8 +++++++- .../metadata/DatanodeStoreSchemaThreeImpl.java | 5 ----- 7 files changed, 9 insertions(+), 54 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java index 2c1c3c214d56..c174108ba239 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBDefinition; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import java.io.File; @@ -70,7 +69,4 @@ public ConfigurationSource getConfig() { public abstract DBColumnFamilyDefinition getMetadataColumnFamily(); - - public abstract DBColumnFamilyDefinition - getDeletedBlocksColumnFamily(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index b451071d7030..faa3b195f14b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -60,8 +60,6 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private Table blockDataTableWithIterator; - private Table deletedBlocksTable; - static final Logger LOG = LoggerFactory.getLogger(AbstractDatanodeStore.class); private volatile DBStore store; @@ -154,10 +152,6 @@ public void start(ConfigurationSource config) blockDataTable = new DatanodeTable<>(blockDataTableWithIterator); checkTableStatus(blockDataTable, blockDataTable.getName()); - - deletedBlocksTable = new DatanodeTable<>( - dbDef.getDeletedBlocksColumnFamily().getTable(this.store)); - checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); } } @@ -191,7 +185,7 @@ public Table getBlockDataTable() { @Override public Table getDeletedBlocksTable() { - return deletedBlocksTable; + throw new UnsupportedOperationException("DeletedBlocksTable is only supported in Container Schema One"); } @Override @@ -250,7 +244,7 @@ protected Table getBlockDataTableWithIterator() { return this.blockDataTableWithIterator; } - private static void checkTableStatus(Table table, String name) + protected static void checkTableStatus(Table table, String name) throws IOException { String logMessage = "Unable to get a reference to %s table. Cannot " + "continue."; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java index a002eef3f72a..f0bab5e5d026 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java @@ -96,7 +96,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override public DBColumnFamilyDefinition getDeletedBlocksColumnFamily() { return DELETED_BLOCKS; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java index 1d1c7faa69ba..3be229a45ac7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaThreeDBDefinition.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; @@ -74,15 +73,6 @@ public class DatanodeSchemaThreeDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETED_BLOCKS = - new DBColumnFamilyDefinition<>( - "deleted_blocks", - String.class, - FixedLengthStringCodec.get(), - ChunkInfoList.class, - ChunkInfoList.getCodec()); - public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( @@ -98,7 +88,6 @@ public class DatanodeSchemaThreeDBDefinition COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, - DELETED_BLOCKS, DELETE_TRANSACTION); public DatanodeSchemaThreeDBDefinition(String dbPath, @@ -120,7 +109,6 @@ public DatanodeSchemaThreeDBDefinition(String dbPath, BLOCK_DATA.setCfOptions(cfOptions); METADATA.setCfOptions(cfOptions); - DELETED_BLOCKS.setCfOptions(cfOptions); DELETE_TRANSACTION.setCfOptions(cfOptions); } @@ -140,12 +128,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override - public DBColumnFamilyDefinition - getDeletedBlocksColumnFamily() { - return DELETED_BLOCKS; - } - public DBColumnFamilyDefinition getDeleteTransactionsColumnFamily() { return DELETE_TRANSACTION; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java index cc78aa6a92c8..cc6c3dc83725 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.utils.db.Proto2Codec; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; @@ -58,15 +57,6 @@ public class DatanodeSchemaTwoDBDefinition Long.class, LongCodec.get()); - public static final DBColumnFamilyDefinition - DELETED_BLOCKS = - new DBColumnFamilyDefinition<>( - "deleted_blocks", - String.class, - StringCodec.get(), - ChunkInfoList.class, - ChunkInfoList.getCodec()); - public static final DBColumnFamilyDefinition DELETE_TRANSACTION = new DBColumnFamilyDefinition<>( @@ -85,7 +75,6 @@ public DatanodeSchemaTwoDBDefinition(String dbPath, COLUMN_FAMILIES = DBColumnFamilyDefinition.newUnmodifiableMap( BLOCK_DATA, METADATA, - DELETED_BLOCKS, DELETE_TRANSACTION); @Override @@ -104,12 +93,6 @@ public DBColumnFamilyDefinition getMetadataColumnFamily() { return METADATA; } - @Override - public DBColumnFamilyDefinition - getDeletedBlocksColumnFamily() { - return DELETED_BLOCKS; - } - public DBColumnFamilyDefinition getDeleteTransactionsColumnFamily() { return DELETE_TRANSACTION; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java index 4b514c04e44e..f5eb1a3d8ec5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java @@ -28,6 +28,9 @@ * places all data in the default column family. */ public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore { + + private Table deletedBlocksTable; + /** * Constructs the metadata store and starts the DB Services. * @@ -38,12 +41,15 @@ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, String dbPath, boolean openReadOnly) throws IOException { super(config, new DatanodeSchemaOneDBDefinition(dbPath, config), openReadOnly); + deletedBlocksTable = new DatanodeTable<>( + ((DatanodeSchemaOneDBDefinition) getDbDef()).getDeletedBlocksColumnFamily().getTable(getStore())); + checkTableStatus(deletedBlocksTable, deletedBlocksTable.getName()); } @Override public Table getDeletedBlocksTable() { // Return a wrapper around the deleted blocks table to handle prefixes // when all data is stored in a single table. - return new SchemaOneDeletedBlocksTable(super.getDeletedBlocksTable()); + return new SchemaOneDeletedBlocksTable(deletedBlocksTable); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java index ee8580defa0c..c16d478b166d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaThreeImpl.java @@ -99,7 +99,6 @@ public void removeKVContainerData(long containerID) throws IOException { try (BatchOperation batch = getBatchHandler().initBatchOperation()) { getMetadataTable().deleteBatchWithPrefix(batch, prefix); getBlockDataTable().deleteBatchWithPrefix(batch, prefix); - getDeletedBlocksTable().deleteBatchWithPrefix(batch, prefix); getDeleteTransactionTable().deleteBatchWithPrefix(batch, prefix); getBatchHandler().commitBatchOperation(batch); } @@ -112,8 +111,6 @@ public void dumpKVContainerData(long containerID, File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir), prefix); getBlockDataTable().dumpToFileWithPrefix( getTableDumpFile(getBlockDataTable(), dumpDir), prefix); - getDeletedBlocksTable().dumpToFileWithPrefix( - getTableDumpFile(getDeletedBlocksTable(), dumpDir), prefix); getDeleteTransactionTable().dumpToFileWithPrefix( getTableDumpFile(getDeleteTransactionTable(), dumpDir), prefix); @@ -125,8 +122,6 @@ public void loadKVContainerData(File dumpDir) getTableDumpFile(getMetadataTable(), dumpDir)); getBlockDataTable().loadFromFile( getTableDumpFile(getBlockDataTable(), dumpDir)); - getDeletedBlocksTable().loadFromFile( - getTableDumpFile(getDeletedBlocksTable(), dumpDir)); getDeleteTransactionTable().loadFromFile( getTableDumpFile(getDeleteTransactionTable(), dumpDir)); } From c9d3b23bb534e7cbc08c6316380b2f22f4763527 Mon Sep 17 00:00:00 2001 From: Ashish Kumar <117710273+ashishkumar50@users.noreply.github.com> Date: Thu, 22 Feb 2024 13:23:06 +0530 Subject: [PATCH 039/108] HDDS-10397. Restrict legacy bucket directory deletion through sh command. (#6244) --- .../hadoop/ozone/shell/TestOzoneShellHA.java | 39 +++++++++++++++++++ .../ozone/shell/keys/DeleteKeyHandler.java | 8 ++++ 2 files changed, 47 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 51956accb1c4..34e63a4a0110 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -62,6 +62,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ToolRunner; @@ -182,6 +183,7 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { final int numDNs = 5; conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); cluster = MiniOzoneCluster.newOMHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) @@ -1984,6 +1986,43 @@ public void testLinkedAndNonLinkedBucketMetaData() out.reset(); } + @Test + public void testKeyDeleteLegacyWithEnableFileSystemPath() throws IOException { + String volumeName = "vol5"; + String bucketName = "legacybucket"; + String[] args = new String[] {"volume", "create", "o3://" + omServiceId + OZONE_URI_DELIMITER + volumeName}; + execute(ozoneShell, args); + + args = new String[] {"bucket", "create", "o3://" + omServiceId + OZONE_URI_DELIMITER + + volumeName + OZONE_URI_DELIMITER + bucketName, "--layout", BucketLayout.LEGACY.toString()}; + execute(ozoneShell, args); + + String dirPath = OZONE_URI_DELIMITER + volumeName + OZONE_URI_DELIMITER + + bucketName + OZONE_URI_DELIMITER + "dir/"; + String keyPath = dirPath + "key1"; + + // Create key, it will generate two keys, one with dirPath other with keyPath + args = new String[] {"key", "put", "o3://" + omServiceId + keyPath, testFile.getPath()}; + execute(ozoneShell, args); + + // Enable fileSystem path for client config + String fileSystemEnable = generateSetConfString(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + // Delete dirPath key, it should fail + args = new String[] {fileSystemEnable, "key", "delete", dirPath}; + execute(ozoneShell, args); + + // Check number of keys + OzoneVolume volume = client.getObjectStore().getVolume(volumeName); + OzoneBucket bucket = volume.getBucket(bucketName); + List files = bucket.listStatus("", true, "", 5); + // Two keys should still exist, dirPath and keyPath + assertEquals(2, files.size()); + + // cleanup + args = new String[] {"volume", "delete", volumeName, "-r", "--yes"}; + execute(ozoneShell, args); + } + private static String getKeyProviderURI(MiniKMS kms) { return KMSClientProvider.SCHEME_NAME + "://" + kms.getKMSUrl().toExternalForm().replace("://", "@"); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java index d1a6a4e156fd..4c795f1e82b4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java @@ -41,6 +41,8 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT; /** * Executes Delete Key. @@ -68,6 +70,12 @@ protected void execute(OzoneClient client, OzoneAddress address) return; } + if (bucket.getBucketLayout().isLegacy() && keyName.endsWith(OZONE_URI_DELIMITER) + && (getConf().getBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT))) { + out().printf("Use FS(ofs/o3fs) interface to delete legacy bucket directory %n"); + return; + } + if (bucket.getBucketLayout().isFileSystemOptimized()) { // Handle FSO delete key which supports trash also deleteFSOKey(bucket, keyName); From 5f6306dd1dde3c9c982ed933aa1fa8f4fa7a9301 Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Thu, 22 Feb 2024 18:47:58 +0800 Subject: [PATCH 040/108] HDDS-10395. Fix eTag compatibility issues for MPU (#6235) --- .../OzoneMultipartUploadPartListParts.java | 12 ++-- .../hadoop/ozone/om/helpers/OmPartInfo.java | 24 +++++--- .../hadoop/ozone/om/KeyManagerImpl.java | 14 ++++- .../hadoop/ozone/om/TestKeyManagerUnit.java | 55 +++++++++++++++++++ .../ozone/s3/endpoint/ObjectEndpoint.java | 11 +++- 5 files changed, 97 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java index 67f8edf31408..c085720d1918 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneMultipartUploadPartListParts.java @@ -98,13 +98,13 @@ public ReplicationConfig getReplicationConfig() { /** * Class that represents each Part information of a multipart upload part. */ - public static class PartInfo { + public static final class PartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; - private String eTag; + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; public PartInfo(int number, String name, long time, long size, String eTag) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java index e908c5a025f1..35d97cd4ffdc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPartInfo.java @@ -23,12 +23,12 @@ /** * Class that defines information about each part of a multipart upload key. */ -public class OmPartInfo { - private int partNumber; - private String partName; - private long modificationTime; - private long size; - private String eTag; +public final class OmPartInfo { + private final int partNumber; + private final String partName; + private final long modificationTime; + private final long size; + private final String eTag; public OmPartInfo(int number, String name, long time, long size, String eTag) { @@ -60,8 +60,14 @@ public String getETag() { } public PartInfo getProto() { - return PartInfo.newBuilder().setPartNumber(partNumber).setPartName(partName) - .setModificationTime(modificationTime) - .setSize(size).setETag(eTag).build(); + PartInfo.Builder builder = PartInfo.newBuilder() + .setPartNumber(partNumber) + .setPartName(partName) + .setModificationTime(modificationTime) + .setSize(size); + if (eTag != null) { + builder.setETag(eTag); + } + return builder.build(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 2c9419e78d07..3786601dd63a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -32,6 +32,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.Stack; import java.util.TreeMap; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; @@ -822,13 +824,19 @@ public OmMultipartUploadListParts listParts(String volumeName, if (nextPartNumberMarker > partNumberMarker) { String partName = getPartName(partKeyInfo, volumeName, bucketName, keyName); + // Before HDDS-9680, MPU part does not have eTag metadata, for + // this case, we return null. The S3G will handle this case by + // using the MPU part name as the eTag field instead. + Optional eTag = partKeyInfo.getPartKeyInfo() + .getMetadataList() + .stream() + .filter(keyValue -> keyValue.getKey().equals(ETAG)) + .findFirst(); OmPartInfo omPartInfo = new OmPartInfo(partKeyInfo.getPartNumber(), partName, partKeyInfo.getPartKeyInfo().getModificationTime(), partKeyInfo.getPartKeyInfo().getDataSize(), - partKeyInfo.getPartKeyInfo().getMetadataList().stream() - .filter(keyValue -> keyValue.getKey().equals(ETAG)) - .findFirst().get().getValue()); + eTag.map(HddsProtos.KeyValue::getValue).orElse(null)); omPartInfoList.add(omPartInfo); //if there are parts, use replication type from one of the parts diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 6454a77d66f3..278d96023c81 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -65,6 +65,7 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -161,6 +162,60 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { omMultipartUploadListParts.getPartInfoList().size()); } + @Test + public void listMultipartUploadPartsWithoutEtagField() throws IOException { + // For backward compatibility reasons + final String volume = volumeName(); + final String bucket = "bucketForEtag"; + final String key = "dir/key1"; + createBucket(metadataManager, volume, bucket); + OmMultipartInfo omMultipartInfo = + initMultipartUpload(writeClient, volume, bucket, key); + + + // Commit some MPU parts without eTag field + for (int i = 1; i <= 5; i++) { + OmKeyArgs partKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .build(); + + OpenKeySession openKey = writeClient.openKey(partKeyArgs); + + OmKeyArgs commitPartKeyArgs = + new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(bucket) + .setKeyName(key) + .setIsMultipartKey(true) + .setMultipartUploadID(omMultipartInfo.getUploadID()) + .setMultipartUploadPartNumber(i) + .setAcls(Collections.emptyList()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setLocationInfoList(Collections.emptyList()) + .build(); + + writeClient.commitMultipartUploadPart(commitPartKeyArgs, openKey.getId()); + } + + + OmMultipartUploadListParts omMultipartUploadListParts = keyManager + .listParts(volume, bucket, key, omMultipartInfo.getUploadID(), + 0, 10); + assertEquals(5, + omMultipartUploadListParts.getPartInfoList().size()); + + } + private String volumeName() { return getTestName(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 4a36ad9e62a8..24115abe8e6b 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -999,6 +999,12 @@ private Response createMultipartKey(OzoneVolume volume, String bucket, OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = keyOutputStream.getCommitUploadPartInfo(); String eTag = omMultipartCommitUploadPartInfo.getETag(); + // If the OmMultipartCommitUploadPartInfo does not contain eTag, + // fall back to MPU part name for compatibility in case the (old) OM + // does not return the eTag field + if (StringUtils.isEmpty(eTag)) { + eTag = omMultipartCommitUploadPartInfo.getPartName(); + } if (copyHeader != null) { getMetrics().updateCopyObjectSuccessStats(startNanos); @@ -1069,7 +1075,10 @@ private Response listParts(String bucket, String key, String uploadID, ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { ListPartsResponse.Part part = new ListPartsResponse.Part(); part.setPartNumber(partInfo.getPartNumber()); - part.setETag(partInfo.getETag()); + // If the ETag field does not exist, use MPU part name for backward + // compatibility + part.setETag(StringUtils.isNotEmpty(partInfo.getETag()) ? + partInfo.getETag() : partInfo.getPartName()); part.setSize(partInfo.getSize()); part.setLastModified(Instant.ofEpochMilli( partInfo.getModificationTime())); From 45d420ab7c5d95e8c95cf6f650732681437856ef Mon Sep 17 00:00:00 2001 From: Arafat2198 <98023601+ArafatKhan2198@users.noreply.github.com> Date: Thu, 22 Feb 2024 23:19:14 +0530 Subject: [PATCH 041/108] HDDS-10293. IllegalArgumentException: containerSize Negative (#6178) --- .../schema/ContainerSchemaDefinition.java | 3 +- .../types/UnhealthyContainersResponse.java | 13 +++ .../ozone/recon/fsck/ContainerHealthTask.java | 38 ++++++++- .../recon/tasks/ContainerSizeCountTask.java | 79 +++++++++++++------ .../hadoop/ozone/recon/api/TestEndpoints.java | 2 + .../recon/fsck/TestContainerHealthTask.java | 59 ++++++++++++++ .../tasks/TestContainerSizeCountTask.java | 74 +++++++++++++++-- 7 files changed, 236 insertions(+), 32 deletions(-) diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java index 43e2d728b763..4d62ca886cda 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ContainerSchemaDefinition.java @@ -51,7 +51,8 @@ public enum UnHealthyContainerStates { UNDER_REPLICATED, OVER_REPLICATED, MIS_REPLICATED, - ALL_REPLICAS_UNHEALTHY + ALL_REPLICAS_UNHEALTHY, + NEGATIVE_SIZE // Added new state to track containers with negative sizes } private static final String CONTAINER_ID = "container_id"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java index eaf08d9ca83e..ba03ec61f145 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/UnhealthyContainersResponse.java @@ -50,6 +50,12 @@ public class UnhealthyContainersResponse { @JsonProperty("misReplicatedCount") private long misReplicatedCount = 0; + /** + * Total count of containers with negative size. + */ + @JsonProperty("negativeSizeCount") + private long negativeSizeCount = 0; + /** * A collection of unhealthy containers. */ @@ -77,6 +83,9 @@ public void setSummaryCount(String state, long count) { } else if (state.equals( UnHealthyContainerStates.MIS_REPLICATED.toString())) { this.misReplicatedCount = count; + } else if (state.equals( + UnHealthyContainerStates.NEGATIVE_SIZE.toString())) { + this.negativeSizeCount = count; } } @@ -96,6 +105,10 @@ public long getMisReplicatedCount() { return misReplicatedCount; } + public long getNegativeSizeCount() { + return negativeSizeCount; + } + public Collection getContainers() { return containers; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 577fb7d2bcc1..a5d259d3e939 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -217,6 +217,8 @@ private void initializeUnhealthyContainerStateStatsMap( UnHealthyContainerStates.OVER_REPLICATED, new HashMap<>()); unhealthyContainerStateStatsMap.put( UnHealthyContainerStates.MIS_REPLICATED, new HashMap<>()); + unhealthyContainerStateStatsMap.put( + UnHealthyContainerStates.NEGATIVE_SIZE, new HashMap<>()); } private ContainerHealthStatus setCurrentContainer(long recordId) @@ -313,13 +315,21 @@ private long processExistingDBRecords(long currentTime, private void processContainer(ContainerInfo container, long currentTime, Map> - unhealthyContainerStateStatsMap) { + unhealthyContainerStateStatsMap) { try { Set containerReplicas = containerManager.getContainerReplicas(container.containerID()); ContainerHealthStatus h = new ContainerHealthStatus(container, containerReplicas, placementPolicy, reconContainerMetadataManager, conf); + + // Handle negative sized containers separately + if (h.getContainer().getUsedBytes() < 0) { + handleNegativeSizedContainers(h, currentTime, + unhealthyContainerStateStatsMap); + return; + } + if (h.isHealthilyReplicated() || h.isDeleted()) { return; } @@ -365,6 +375,32 @@ private boolean containerDeletedInSCM(ContainerInfo containerInfo) { return false; } + /** + * This method is used to handle containers with negative sizes. It logs an + * error message and inserts a record into the UNHEALTHY_CONTAINERS table. + * @param containerHealthStatus + * @param currentTime + * @param unhealthyContainerStateStatsMap + */ + private void handleNegativeSizedContainers( + ContainerHealthStatus containerHealthStatus, long currentTime, + Map> + unhealthyContainerStateStatsMap) { + ContainerInfo container = containerHealthStatus.getContainer(); + LOG.error( + "Container {} has negative size. Please visit Recon's unhealthy " + + "container endpoint for more details.", + container.getContainerID()); + UnhealthyContainers record = + ContainerHealthRecords.recordForState(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, currentTime); + List records = Collections.singletonList(record); + populateContainerStats(containerHealthStatus, + UnHealthyContainerStates.NEGATIVE_SIZE, + unhealthyContainerStateStatsMap); + containerHealthSchemaManager.insertUnhealthyContainerRecords(records); + } + /** * Helper methods to generate and update the required database records for * unhealthy containers. diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java index fb387861f0e3..105406f2bdf6 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.scm.ReconScmTask; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.ContainerCountBySizeDao; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; @@ -34,13 +35,14 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.ArrayList; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; @@ -60,6 +62,8 @@ public class ContainerSizeCountTask extends ReconScmTask { private ContainerCountBySizeDao containerCountBySizeDao; private DSLContext dslContext; private HashMap processedContainers = new HashMap<>(); + private Map> + unhealthyContainerStateStatsMap; private ReadWriteLock lock = new ReentrantReadWriteLock(true); public ContainerSizeCountTask( @@ -121,7 +125,17 @@ protected synchronized void run() { private void process(ContainerInfo container, Map map) { final ContainerID id = container.containerID(); - final long currentSize = container.getUsedBytes(); + final long usedBytes = container.getUsedBytes(); + final long currentSize; + + if (usedBytes < 0) { + LOG.warn("Negative usedBytes ({}) for container {}, treating it as 0", + usedBytes, id); + currentSize = 0; + } else { + currentSize = usedBytes; + } + final Long previousSize = processedContainers.put(id, currentSize); if (previousSize != null) { decrementContainerSizeCount(previousSize, map); @@ -132,24 +146,27 @@ private void process(ContainerInfo container, /** * The process() function is responsible for updating the counts of * containers being tracked in a containerSizeCountMap based on the - * ContainerInfo objects in the list containers.It then iterates through + * ContainerInfo objects in the list containers. It then iterates through * the list of containers and does the following for each container: * - * 1) If the container is not present in processedContainers, - * it is a new container, so it is added to the processedContainers map - * and the count for its size in the containerSizeCountMap is incremented - * by 1 using the handlePutKeyEvent() function. - * 2) If the container is present in processedContainers but its size has - * been updated to the new size then the count for the old size in the - * containerSizeCountMap is decremented by 1 using the - * handleDeleteKeyEvent() function. The count for the new size is then - * incremented by 1 using the handlePutKeyEvent() function. - * 3) If the container is not present in containers list, it means the - * container has been deleted. - * The remaining containers inside the deletedContainers map are the ones - * that are not in the cluster and need to be deleted. Finally, the counts in - * the containerSizeCountMap are written to the database using the - * writeCountsToDB() function. + * 1) If the container's state is not "deleted," it will be processed: + * - If the container is not present in processedContainers, it is a new + * container. Therefore, it is added to the processedContainers map, and + * the count for its size in the containerSizeCountMap is incremented by + * 1 using the handlePutKeyEvent() function. + * - If the container is present in processedContainers but its size has + * been updated to a new size, the count for the old size in the + * containerSizeCountMap is decremented by 1 using the + * handleDeleteKeyEvent() function. Subsequently, the count for the new + * size is incremented by 1 using the handlePutKeyEvent() function. + * + * 2) If the container's state is "deleted," it is skipped, as deleted + * containers are not processed. + * + * After processing, the remaining containers inside the deletedContainers map + * are those that are not in the cluster and need to be deleted from the total + * size counts. Finally, the counts in the containerSizeCountMap are written + * to the database using the writeCountsToDB() function. */ public void process(List containers) { lock.writeLock().lock(); @@ -161,7 +178,9 @@ public void process(List containers) { // Loop to handle container create and size-update operations for (ContainerInfo container : containers) { - // The containers present in the cache hence it is not yet deleted + if (container.getState().equals(DELETED)) { + continue; // Skip deleted containers + } deletedContainers.remove(container.containerID()); // For New Container being created try { @@ -246,10 +265,10 @@ public String getTaskName() { /** * - * The handleContainerDeleteOperations() function loops through the entries - * in the deletedContainers map and calls the handleDeleteKeyEvent() function - * for each one. This will decrement the size counts of those containers by - * one which are no longer present in the cluster + * Handles the deletion of containers by updating the tracking of processed containers + * and adjusting the count of containers based on their sizes. When a container is deleted, + * it is removed from the tracking of processed containers, and the count of containers + * corresponding to its size is decremented in the container size count map. * * Used by process() * @@ -261,6 +280,9 @@ private void handleContainerDeleteOperations( Map containerSizeCountMap) { for (Map.Entry containerId : deletedContainers.entrySet()) { + // processedContainers will only keep a track of all containers that have + // been processed except DELETED containers. + processedContainers.remove(containerId.getKey()); long containerSize = deletedContainers.get(containerId.getKey()); decrementContainerSizeCount(containerSize, containerSizeCountMap); } @@ -316,19 +338,26 @@ private static void updateContainerSizeCount(long containerSize, int delta, } /** - * * The purpose of this function is to categorize containers into different * size ranges, or "bins," based on their size. * The ContainerSizeCountKey object is used to store the upper bound value * for each size range, and is later used to lookup the count of containers * in that size range within a Map. * - * Used by decrementContainerSizeCount() and incrementContainerSizeCount() + * If the container size is 0, the method sets the size of + * ContainerSizeCountKey as zero without calculating the upper bound. Used by + * decrementContainerSizeCount() and incrementContainerSizeCount() * * @param containerSize to calculate the upperSizeBound */ private static ContainerSizeCountKey getContainerSizeCountKey( long containerSize) { + // If containerSize is 0, return a ContainerSizeCountKey with size 0 + if (containerSize == 0) { + return new ContainerSizeCountKey(0L); + } + + // Otherwise, calculate the upperSizeBound return new ContainerSizeCountKey( ReconUtils.getContainerSizeUpperBound(containerSize)); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 42aabef0cf15..f7deaddd4fb1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -867,10 +867,12 @@ public void testGetContainerCounts() throws Exception { ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(LifeCycleState.OPEN); ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(LifeCycleState.OPEN); // Create a list of container info objects List containers = new ArrayList<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 371fb6f9d675..001d44d9c203 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -343,6 +343,65 @@ public void testDeletedContainer() throws Exception { .isGreaterThan(currentTime); } + @Test + public void testNegativeSizeContainers() throws Exception { + // Setup mock objects and test environment + UnhealthyContainersDao unhealthyContainersDao = + getDao(UnhealthyContainersDao.class); + ContainerHealthSchemaManager containerHealthSchemaManager = + new ContainerHealthSchemaManager( + getSchemaDefinition(ContainerSchemaDefinition.class), + unhealthyContainersDao); + ReconStorageContainerManagerFacade scmMock = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManagerMock = mock(ContainerManager.class); + StorageContainerServiceProvider scmClientMock = + mock(StorageContainerServiceProvider.class); + ReconContainerMetadataManager reconContainerMetadataManager = + mock(ReconContainerMetadataManager.class); + MockPlacementPolicy placementMock = new MockPlacementPolicy(); + + // Mock container info setup + List mockContainers = getMockContainers(3); + when(scmMock.getContainerManager()).thenReturn(containerManagerMock); + when(scmMock.getScmServiceProvider()).thenReturn(scmClientMock); + when(containerManagerMock.getContainers(any(ContainerID.class), + anyInt())).thenReturn(mockContainers); + for (ContainerInfo c : mockContainers) { + when(containerManagerMock.getContainer( + c.containerID())).thenReturn(c); + when(scmClientMock.getContainerWithPipeline( + c.getContainerID())).thenReturn(new ContainerWithPipeline(c, null)); + when(containerManagerMock.getContainer(c.containerID()) + .getUsedBytes()).thenReturn(Long.valueOf(-10)); + } + + // Verify the table is initially empty + assertThat(unhealthyContainersDao.findAll()).isEmpty(); + + // Setup and start the container health task + ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); + ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); + ContainerHealthTask containerHealthTask = new ContainerHealthTask( + scmMock.getContainerManager(), scmMock.getScmServiceProvider(), + reconTaskStatusDao, + containerHealthSchemaManager, placementMock, reconTaskConfig, + reconContainerMetadataManager, + new OzoneConfiguration()); + containerHealthTask.start(); + + // Wait for the task to identify unhealthy containers + LambdaTestUtils.await(6000, 1000, + () -> unhealthyContainersDao.count() == 3); + + // Assert that all unhealthy containers have been identified as NEGATIVE_SIZE states + List negativeSizeContainers = + unhealthyContainersDao.fetchByContainerState("NEGATIVE_SIZE"); + assertThat(negativeSizeContainers).hasSize(3); + } + + private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index eff330a796c9..a996f167a1bb 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -18,6 +18,11 @@ package org.apache.hadoop.ozone.recon.tasks; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.BDDMockito.given; @@ -84,18 +89,21 @@ public void setUp() { @Test public void testProcess() { // mock a container with invalid used bytes - final ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); + ContainerInfo omContainerInfo0 = mock(ContainerInfo.class); given(omContainerInfo0.containerID()).willReturn(new ContainerID(0)); given(omContainerInfo0.getUsedBytes()).willReturn(-1L); + given(omContainerInfo0.getState()).willReturn(OPEN); // Write 2 keys ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(CLOSED); ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(CLOSING); // mock getContainers method to return a list of containers List containers = new ArrayList<>(); @@ -105,8 +113,8 @@ public void testProcess() { task.process(containers); - // Verify 2 containers are in correct bins. - assertEquals(2, containerCountBySizeDao.count()); + // Verify 3 containers are in correct bins. + assertEquals(3, containerCountBySizeDao.count()); // container size upper bound for // 1500000000L (1.5GB) is 2147483648L = 2^31 = 2GB (next highest power of 2) @@ -124,10 +132,11 @@ public void testProcess() { containerCountBySizeDao.findById(recordToFind.value1()).getCount() .longValue()); - // Add a new key + // Add a new container ContainerInfo omContainerInfo3 = mock(ContainerInfo.class); given(omContainerInfo3.containerID()).willReturn(new ContainerID(3)); given(omContainerInfo3.getUsedBytes()).willReturn(1000000000L); // 1GB + given(omContainerInfo3.getState()).willReturn(QUASI_CLOSED); containers.add(omContainerInfo3); // Update existing key. @@ -137,7 +146,7 @@ public void testProcess() { task.process(containers); // Total size groups added to the database - assertEquals(4, containerCountBySizeDao.count()); + assertEquals(5, containerCountBySizeDao.count()); // Check whether container size upper bound for // 50000L is 536870912L = 2^29 = 512MB (next highest power of 2) @@ -164,4 +173,59 @@ public void testProcess() { .getCount() .longValue()); } + + @Test + public void testProcessDeletedAndNegativeSizedContainers() { + // Create a list of containers, including one that is deleted + ContainerInfo omContainerInfo1 = mock(ContainerInfo.class); + given(omContainerInfo1.containerID()).willReturn(new ContainerID(1)); + given(omContainerInfo1.getUsedBytes()).willReturn(1500000000L); // 1.5GB + given(omContainerInfo1.getState()).willReturn(OPEN); + + ContainerInfo omContainerInfo2 = mock(ContainerInfo.class); + given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); + given(omContainerInfo2.getUsedBytes()).willReturn(2500000000L); // 2.5GB + given(omContainerInfo2.getState()).willReturn(CLOSED); + + ContainerInfo omContainerInfoDeleted = mock(ContainerInfo.class); + given(omContainerInfoDeleted.containerID()).willReturn(new ContainerID(3)); + given(omContainerInfoDeleted.getUsedBytes()).willReturn(1000000000L); + given(omContainerInfoDeleted.getState()).willReturn(DELETED); // 1GB + + // Create a mock container with negative size + final ContainerInfo negativeSizeContainer = mock(ContainerInfo.class); + given(negativeSizeContainer.containerID()).willReturn(new ContainerID(0)); + given(negativeSizeContainer.getUsedBytes()).willReturn(-1L); + given(negativeSizeContainer.getState()).willReturn(OPEN); + + // Create a mock container with negative size and DELETE state + final ContainerInfo negativeSizeDeletedContainer = + mock(ContainerInfo.class); + given(negativeSizeDeletedContainer.containerID()).willReturn( + new ContainerID(0)); + given(negativeSizeDeletedContainer.getUsedBytes()).willReturn(-1L); + given(negativeSizeDeletedContainer.getState()).willReturn(DELETED); + + // Create a mock container with id 1 and updated size of 1GB from 1.5GB + final ContainerInfo validSizeContainer = mock(ContainerInfo.class); + given(validSizeContainer.containerID()).willReturn(new ContainerID(1)); + given(validSizeContainer.getUsedBytes()).willReturn(1000000000L); // 1GB + given(validSizeContainer.getState()).willReturn(CLOSED); + + // Mock getContainers method to return a list of containers including + // both valid and invalid ones + List containers = new ArrayList<>(); + containers.add(omContainerInfo1); + containers.add(omContainerInfo2); + containers.add(omContainerInfoDeleted); + containers.add(negativeSizeContainer); + containers.add(negativeSizeDeletedContainer); + containers.add(validSizeContainer); + + task.process(containers); + + // Verify that only the valid containers are counted + assertEquals(3, containerCountBySizeDao.count()); + } + } From f0b75b7e4ee93e89f9e4fc96cb30d59f78746eb5 Mon Sep 17 00:00:00 2001 From: XiChen <32928346+xichen01@users.noreply.github.com> Date: Fri, 23 Feb 2024 02:04:09 +0800 Subject: [PATCH 042/108] HDDS-10383. Introduce a Provider for client-side thread resources passing (#6222) --- .../hadoop/hdds/scm/OzoneClientConfig.java | 15 ++++++ .../hdds/scm/storage/BlockOutputStream.java | 4 +- .../hdds/scm/storage/ECBlockOutputStream.java | 7 ++- .../scm/storage/RatisBlockOutputStream.java | 11 ++-- .../TestBlockOutputStreamCorrectness.java | 5 +- .../ECReconstructionCoordinator.java | 48 +++++++++++------- .../client/io/BlockOutputStreamEntry.java | 19 ++++++- .../client/io/BlockOutputStreamEntryPool.java | 9 ++++ .../client/io/ECBlockOutputStreamEntry.java | 3 +- .../io/ECBlockOutputStreamEntryPool.java | 3 +- .../ozone/client/io/KeyOutputStream.java | 12 +++++ .../hadoop/ozone/client/rpc/RpcClient.java | 50 +++++++++---------- 12 files changed, 131 insertions(+), 55 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 44af34cb919c..65e466529773 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -201,6 +201,13 @@ public enum ChecksumCombineMode { // 3 concurrent stripe read should be enough. private int ecReconstructStripeReadPoolLimit = 10 * 3; + @Config(key = "ec.reconstruct.stripe.write.pool.limit", + defaultValue = "30", + description = "Thread pool max size for parallelly write" + + " available ec chunks to reconstruct the whole stripe.", + tags = ConfigTag.CLIENT) + private int ecReconstructStripeWritePoolLimit = 10 * 3; + @Config(key = "checksum.combine.mode", defaultValue = "COMPOSITE_CRC", description = "The combined checksum type [MD5MD5CRC / COMPOSITE_CRC] " @@ -387,6 +394,14 @@ public int getEcReconstructStripeReadPoolLimit() { return ecReconstructStripeReadPoolLimit; } + public void setEcReconstructStripeWritePoolLimit(int poolLimit) { + this.ecReconstructStripeWritePoolLimit = poolLimit; + } + + public int getEcReconstructStripeWritePoolLimit() { + return ecReconstructStripeWritePoolLimit; + } + public void setFsDefaultBucketLayout(String bucketLayout) { if (!bucketLayout.isEmpty()) { this.fsDefaultBucketLayout = bucketLayout; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index bbc461669584..5ff5da60989e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -28,6 +28,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -145,7 +146,8 @@ public BlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier blockOutputStreamResourceProvider ) throws IOException { this.xceiverClientFactory = xceiverClientManager; this.config = config; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java index 0abc2274bf08..adecc3e4c1e2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ECBlockOutputStream.java @@ -44,6 +44,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync; @@ -75,10 +77,11 @@ public ECBlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier executorServiceSupplier ) throws IOException { super(blockID, xceiverClientManager, - pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs); + pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, executorServiceSupplier); // In EC stream, there will be only one node in pipeline. this.datanodeDetails = pipeline.getClosestNode(); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index b52fc2af9178..6a2758d36486 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -37,6 +37,8 @@ import java.util.Map; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; /** * An {@link OutputStream} used by the REST service in combination with the @@ -65,8 +67,8 @@ public class RatisBlockOutputStream extends BlockOutputStream /** * Creates a new BlockOutputStream. * - * @param blockID block ID - * @param bufferPool pool of buffers + * @param blockID block ID + * @param bufferPool pool of buffers */ @SuppressWarnings("checkstyle:ParameterNumber") public RatisBlockOutputStream( @@ -76,10 +78,11 @@ public RatisBlockOutputStream( BufferPool bufferPool, OzoneClientConfig config, Token token, - ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs + ContainerClientMetrics clientMetrics, StreamBufferArgs streamBufferArgs, + Supplier blockOutputStreamResourceProvider ) throws IOException { super(blockID, xceiverClientManager, pipeline, - bufferPool, config, token, clientMetrics, streamBufferArgs); + bufferPool, config, token, clientMetrics, streamBufferArgs, blockOutputStreamResourceProvider); this.commitWatcher = new CommitWatcher(bufferPool, getXceiverClient()); } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 9b061f5392d3..d06c9cf684f4 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -47,6 +47,7 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; +import static java.util.concurrent.Executors.newFixedThreadPool; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -108,7 +109,9 @@ private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) bufferPool, config, null, - ContainerClientMetrics.acquire(), streamBufferArgs); + ContainerClientMetrics.acquire(), + streamBufferArgs, + () -> newFixedThreadPool(10)); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 234439a00c24..a45c15844847 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -35,8 +35,8 @@ import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; -import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.IOUtils; @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.security.token.Token; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,7 +71,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -101,12 +101,15 @@ public class ECReconstructionCoordinator implements Closeable { private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; + // TODO: Adjusts to the appropriate value when the ec-reconstruct-writer thread pool is used. + private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 0; + private final ECContainerOperationClient containerOperationClient; private final ByteBufferPool byteBufferPool; - private final ExecutorService ecReconstructExecutor; - + private final ExecutorService ecReconstructReadExecutor; + private final MemoizedSupplier ecReconstructWriteExecutor; private final BlockInputStreamFactory blockInputStreamFactory; private final TokenHelper tokenHelper; private final ContainerClientMetrics clientMetrics; @@ -123,20 +126,18 @@ public ECReconstructionCoordinator( this.containerOperationClient = new ECContainerOperationClient(conf, certificateClient); this.byteBufferPool = new ElasticByteBufferPool(); - ThreadFactory threadFactory = new ThreadFactoryBuilder() - .setNameFormat(threadNamePrefix + "ec-reconstruct-reader-TID-%d") - .build(); ozoneClientConfig = conf.getObject(OzoneClientConfig.class); - this.ecReconstructExecutor = - new ThreadPoolExecutor(EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), - 60, - TimeUnit.SECONDS, - new SynchronousQueue<>(), - threadFactory, - new ThreadPoolExecutor.CallerRunsPolicy()); + this.ecReconstructReadExecutor = createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, + ozoneClientConfig.getEcReconstructStripeReadPoolLimit(), + threadNamePrefix + "ec-reconstruct-reader-TID-%d"); + this.ecReconstructWriteExecutor = MemoizedSupplier.valueOf( + () -> createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE, + ozoneClientConfig.getEcReconstructStripeWritePoolLimit(), + threadNamePrefix + "ec-reconstruct-writer-TID-%d")); this.blockInputStreamFactory = BlockInputStreamFactoryImpl - .getInstance(byteBufferPool, () -> ecReconstructExecutor); + .getInstance(byteBufferPool, () -> ecReconstructReadExecutor); tokenHelper = new TokenHelper(new SecurityConfig(conf), secretKeyClient); this.clientMetrics = ContainerClientMetrics.acquire(); this.metrics = metrics; @@ -232,7 +233,7 @@ private ECBlockOutputStream getECBlockOutputStream( containerOperationClient.singleNodePipeline(datanodeDetails, repConfig, replicaIndex), BufferPool.empty(), ozoneClientConfig, - blockLocationInfo.getToken(), clientMetrics, streamBufferArgs); + blockLocationInfo.getToken(), clientMetrics, streamBufferArgs, ecReconstructWriteExecutor); } @VisibleForTesting @@ -272,7 +273,7 @@ public void reconstructECBlockGroup(BlockLocationInfo blockLocationInfo, repConfig, blockLocationInfo, true, this.containerOperationClient.getXceiverClientManager(), null, this.blockInputStreamFactory, byteBufferPool, - this.ecReconstructExecutor)) { + this.ecReconstructReadExecutor)) { ECBlockOutputStream[] targetBlockStreams = new ECBlockOutputStream[toReconstructIndexes.size()]; @@ -457,6 +458,9 @@ public void close() throws IOException { if (containerOperationClient != null) { containerOperationClient.close(); } + if (ecReconstructWriteExecutor.isInitialized()) { + ecReconstructWriteExecutor.get().shutdownNow(); + } } private Pipeline rebuildInputPipeline(ECReplicationConfig repConfig, @@ -590,4 +594,12 @@ OptionalLong getTermOfLeaderSCM() { .map(StateContext::getTermOfLeaderSCM) .orElse(OptionalLong.empty()); } + + private static ExecutorService createThreadPoolExecutor( + int corePoolSize, int maximumPoolSize, String threadNameFormat) { + return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index c0221d07a55e..ba3850ff3947 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -21,6 +21,8 @@ import java.io.OutputStream; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hdds.client.BlockID; @@ -64,6 +66,7 @@ public class BlockOutputStreamEntry extends OutputStream { private final BufferPool bufferPool; private final ContainerClientMetrics clientMetrics; private final StreamBufferArgs streamBufferArgs; + private final Supplier executorServiceSupplier; BlockOutputStreamEntry(Builder b) { this.config = b.config; @@ -78,6 +81,7 @@ public class BlockOutputStreamEntry extends OutputStream { this.bufferPool = b.bufferPool; this.clientMetrics = b.clientMetrics; this.streamBufferArgs = b.streamBufferArgs; + this.executorServiceSupplier = b.executorServiceSupplier; } @Override @@ -104,13 +108,18 @@ void checkStream() throws IOException { */ void createOutputStream() throws IOException { outputStream = new RatisBlockOutputStream(blockID, xceiverClientManager, - pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs); + pipeline, bufferPool, config, token, clientMetrics, streamBufferArgs, + executorServiceSupplier); } ContainerClientMetrics getClientMetrics() { return clientMetrics; } + Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + StreamBufferArgs getStreamBufferArgs() { return streamBufferArgs; } @@ -357,6 +366,7 @@ public static class Builder { private OzoneClientConfig config; private ContainerClientMetrics clientMetrics; private StreamBufferArgs streamBufferArgs; + private Supplier executorServiceSupplier; public Pipeline getPipeline() { return pipeline; @@ -406,15 +416,22 @@ public Builder setToken(Token bToken) { this.token = bToken; return this; } + public Builder setClientMetrics(ContainerClientMetrics clientMetrics) { this.clientMetrics = clientMetrics; return this; } + public Builder setStreamBufferArgs(StreamBufferArgs streamBufferArgs) { this.streamBufferArgs = streamBufferArgs; return this; } + public Builder setExecutorServiceSupplier(Supplier executorServiceSupplier) { + this.executorServiceSupplier = executorServiceSupplier; + return this; + } + public BlockOutputStreamEntry build() { return new BlockOutputStreamEntry(this); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java index 4d6026f92590..51383e8717ab 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntryPool.java @@ -25,6 +25,8 @@ import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.concurrent.ExecutorService; +import java.util.function.Supplier; import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.scm.ContainerClientMetrics; @@ -83,6 +85,7 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware { private final ExcludeList excludeList; private final ContainerClientMetrics clientMetrics; private final StreamBufferArgs streamBufferArgs; + private final Supplier executorServiceSupplier; public BlockOutputStreamEntryPool(KeyOutputStream.Builder b) { this.config = b.getClientConfig(); @@ -109,6 +112,7 @@ public BlockOutputStreamEntryPool(KeyOutputStream.Builder b) { ByteStringConversion .createByteBufferConversion(b.isUnsafeByteBufferConversionEnabled())); this.clientMetrics = b.getClientMetrics(); + this.executorServiceSupplier = b.getExecutorServiceSupplier(); } ExcludeList createExcludeList() { @@ -159,6 +163,7 @@ BlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setToken(subKeyInfo.getToken()) .setClientMetrics(clientMetrics) .setStreamBufferArgs(streamBufferArgs) + .setExecutorServiceSupplier(executorServiceSupplier) .build(); } @@ -229,6 +234,10 @@ StreamBufferArgs getStreamBufferArgs() { return streamBufferArgs; } + public Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + /** * Discards the subsequent pre allocated blocks and removes the streamEntries * from the streamEntries list for the container which is closed. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java index 7f6ce87d60c5..241754a57f19 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntry.java @@ -85,7 +85,8 @@ void checkStream() throws IOException { streams[i] = new ECBlockOutputStream(getBlockID(), getXceiverClientManager(), createSingleECBlockPipeline(getPipeline(), nodes.get(i), i + 1), - getBufferPool(), getConf(), getToken(), getClientMetrics(), getStreamBufferArgs()); + getBufferPool(), getConf(), getToken(), getClientMetrics(), getStreamBufferArgs(), + getExecutorServiceSupplier()); } blockOutputStreams = streams; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java index e278097a495a..6eb9aed0d3ad 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECBlockOutputStreamEntryPool.java @@ -48,7 +48,8 @@ ECBlockOutputStreamEntry createStreamEntry(OmKeyLocationInfo subKeyInfo) { .setBufferPool(getBufferPool()) .setToken(subKeyInfo.getToken()) .setClientMetrics(getClientMetrics()) - .setStreamBufferArgs(getStreamBufferArgs()); + .setStreamBufferArgs(getStreamBufferArgs()) + .setExecutorServiceSupplier(getExecutorServiceSupplier()); return b.build(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 9ea17cf8b254..d9e735cd7c8c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -24,7 +24,9 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ExecutorService; import java.util.function.Function; +import java.util.function.Supplier; import java.util.stream.Collectors; import org.apache.hadoop.fs.FSExceptionMessages; @@ -586,6 +588,7 @@ public static class Builder { private ContainerClientMetrics clientMetrics; private boolean atomicKeyCreation = false; private StreamBufferArgs streamBufferArgs; + private Supplier executorServiceSupplier; public String getMultipartUploadID() { return multipartUploadID; @@ -699,6 +702,15 @@ public boolean getAtomicKeyCreation() { return atomicKeyCreation; } + public Builder setExecutorServiceSupplier(Supplier executorServiceSupplier) { + this.executorServiceSupplier = executorServiceSupplier; + return this; + } + + public Supplier getExecutorServiceSupplier() { + return executorServiceSupplier; + } + public KeyOutputStream build() { return new KeyOutputStream(this); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 3e71262040b9..74b22e7ca4c6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -145,6 +145,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.util.MemoizedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -195,6 +196,9 @@ public class RpcClient implements ClientProtocol { // for reconstruction. private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; + // TODO: Adjusts to the appropriate value when the writeThreadPool is used. + private static final int WRITE_POOL_MIN_SIZE = 0; + private final ConfigurationSource conf; private final OzoneManagerClientProtocol ozoneManagerClient; private final XceiverClientFactory xceiverClientManager; @@ -213,8 +217,9 @@ public class RpcClient implements ClientProtocol { private final ByteBufferPool byteBufferPool; private final BlockInputStreamFactory blockInputStreamFactory; private final OzoneManagerVersion omVersion; - private volatile ExecutorService ecReconstructExecutor; + private final MemoizedSupplier ecReconstructExecutor; private final ContainerClientMetrics clientMetrics; + private final MemoizedSupplier writeExecutor; private final AtomicBoolean isS3GRequest = new AtomicBoolean(false); /** @@ -237,6 +242,11 @@ public RpcClient(ConfigurationSource conf, String omServiceId) this.groupRights = aclConfig.getGroupDefaultRights(); this.clientConfig = conf.getObject(OzoneClientConfig.class); + this.ecReconstructExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( + EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, clientConfig.getEcReconstructStripeReadPoolLimit(), + "ec-reconstruct-reader-TID-%d")); + this.writeExecutor = MemoizedSupplier.valueOf(() -> createThreadPoolExecutor( + WRITE_POOL_MIN_SIZE, Integer.MAX_VALUE, "client-write-TID-%d")); OmTransport omTransport = createOmTransport(omServiceId); OzoneManagerProtocolClientSideTranslatorPB @@ -311,7 +321,7 @@ public void onRemoval( }).build(); this.byteBufferPool = new ElasticByteBufferPool(); this.blockInputStreamFactory = BlockInputStreamFactoryImpl - .getInstance(byteBufferPool, this::getECReconstructExecutor); + .getInstance(byteBufferPool, ecReconstructExecutor); this.clientMetrics = ContainerClientMetrics.acquire(); } @@ -1777,9 +1787,11 @@ private OmKeyInfo getKeyInfo(OmKeyArgs keyArgs) throws IOException { @Override public void close() throws IOException { - if (ecReconstructExecutor != null) { - ecReconstructExecutor.shutdownNow(); - ecReconstructExecutor = null; + if (ecReconstructExecutor.isInitialized()) { + ecReconstructExecutor.get().shutdownNow(); + } + if (writeExecutor.isInitialized()) { + writeExecutor.get().shutdownNow(); } IOUtils.cleanupWithLogger(LOG, ozoneManagerClient, xceiverClientManager); keyProviderCache.invalidateAll(); @@ -2400,6 +2412,7 @@ private KeyOutputStream.Builder createKeyOutputStream( .setConfig(clientConfig) .setAtomicKeyCreation(isS3GRequest.get()) .setClientMetrics(clientMetrics) + .setExecutorServiceSupplier(writeExecutor) .setStreamBufferArgs(streamBufferArgs); } @@ -2521,26 +2534,11 @@ public void setTimes(OzoneObj obj, String keyName, long mtime, long atime) ozoneManagerClient.setTimes(builder.build(), mtime, atime); } - public ExecutorService getECReconstructExecutor() { - // local ref to a volatile to ensure access - // to a completed initialized object - ExecutorService executor = ecReconstructExecutor; - if (executor == null) { - synchronized (this) { - executor = ecReconstructExecutor; - if (executor == null) { - ecReconstructExecutor = new ThreadPoolExecutor( - EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE, - clientConfig.getEcReconstructStripeReadPoolLimit(), - 60, TimeUnit.SECONDS, new SynchronousQueue<>(), - new ThreadFactoryBuilder() - .setNameFormat("ec-reconstruct-reader-TID-%d") - .build(), - new ThreadPoolExecutor.CallerRunsPolicy()); - executor = ecReconstructExecutor; - } - } - } - return executor; + private static ExecutorService createThreadPoolExecutor( + int corePoolSize, int maximumPoolSize, String threadNameFormat) { + return new ThreadPoolExecutor(corePoolSize, maximumPoolSize, + 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat(threadNameFormat).build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } } From 6dfd7d46d1404b3ccb981db9af44a0b0f2fcda66 Mon Sep 17 00:00:00 2001 From: Aswin Shakil Balasubramanian Date: Thu, 22 Feb 2024 10:44:58 -0800 Subject: [PATCH 043/108] HDDS-10408. NPE causes OM crash in Snapshot Purge request (#6250) --- .../om/request/snapshot/OMSnapshotPurgeRequest.java | 11 +++++++++++ .../hadoop/ozone/om/snapshot/SnapshotUtils.java | 5 +++++ 2 files changed, 16 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 0fa9087e25e7..3f4d746adb54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -35,6 +35,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; @@ -49,6 +51,8 @@ */ public class OMSnapshotPurgeRequest extends OMClientRequest { + private static final Logger LOG = LoggerFactory.getLogger(OMSnapshotPurgeRequest.class); + public OMSnapshotPurgeRequest(OMRequest omRequest) { super(omRequest); } @@ -83,6 +87,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable() .get(snapTableKey); + if (fromSnapshot == null) { + // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. + LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + + "Snapshot purge request.", snapTableKey); + continue; + } + SnapshotInfo nextSnapshot = SnapshotUtils .getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index 89823995d0cd..2041fa791a76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -40,6 +40,7 @@ import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; @@ -148,6 +149,10 @@ public static SnapshotInfo getNextActiveSnapshot(SnapshotInfo snapInfo, // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot // is removed in-memory but OMDoubleBuffer has not flushed yet. + if (snapInfo == null) { + throw new OMException("Snapshot Info is null. Cannot get the next snapshot", INVALID_SNAPSHOT_ERROR); + } + try { while (chainManager.hasNextPathSnapshot(snapInfo.getSnapshotPath(), snapInfo.getSnapshotId())) { From b537a6a710e83a413cfba87c02e0357c0365d114 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 22 Feb 2024 21:00:44 +0100 Subject: [PATCH 044/108] HDDS-10396. Encapsulate fields in WithMetadata and subclasses (#6238) --- .../hadoop/ozone/client/OzoneBucket.java | 2 +- .../hadoop/ozone/client/OzoneVolume.java | 2 +- .../hadoop/ozone/om/helpers/OmBucketArgs.java | 4 +- .../hadoop/ozone/om/helpers/OmBucketInfo.java | 32 ++++++------ .../ozone/om/helpers/OmDirectoryInfo.java | 40 +++++++-------- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 51 ++++++++----------- .../ozone/om/helpers/OmMultipartKeyInfo.java | 10 ++-- .../hadoop/ozone/om/helpers/OmVolumeArgs.java | 22 ++++---- .../hadoop/ozone/om/helpers/WithMetadata.java | 9 ++-- .../hadoop/ozone/om/helpers/WithObjectID.java | 45 +++++++--------- .../ozone/om/helpers/WithParentObjectId.java | 10 ++-- .../hadoop/ozone/om/helpers/OmPrefixInfo.java | 32 ++++++------ 12 files changed, 119 insertions(+), 140 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index ca885b3b6b06..dc7d6cf0a717 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -154,7 +154,7 @@ public class OzoneBucket extends WithMetadata { private String owner; protected OzoneBucket(Builder builder) { - this.metadata = builder.metadata; + setMetadata(builder.metadata); this.proxy = builder.proxy; this.volumeName = builder.volumeName; this.name = builder.name; // bucket name diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 47b50c042a27..9c489943720c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -106,7 +106,7 @@ public class OzoneVolume extends WithMetadata { private long refCount; protected OzoneVolume(Builder builder) { - this.metadata = builder.metadata; + setMetadata(builder.metadata); this.proxy = builder.proxy; this.name = builder.name; this.admin = builder.admin; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index e382377dff45..55d05dccd755 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -80,7 +80,7 @@ private OmBucketArgs(String volumeName, String bucketName, this.bucketName = bucketName; this.isVersionEnabled = isVersionEnabled; this.storageType = storageType; - this.metadata = metadata; + setMetadata(metadata); this.ownerName = ownerName; } @@ -206,7 +206,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.VOLUME, this.volumeName); auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); + getMetadata().get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, String.valueOf(this.isVersionEnabled)); if (this.storageType != null) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index cc811053eb27..a1023d555c6b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -158,9 +158,9 @@ private OmBucketInfo(String volumeName, this.storageType = storageType; this.creationTime = creationTime; this.modificationTime = modificationTime; - this.objectID = objectID; - this.updateID = updateID; - this.metadata = metadata; + setObjectID(objectID); + setUpdateID(updateID); + setMetadata(metadata); this.bekInfo = bekInfo; this.sourceVolume = sourceVolume; this.sourceBucket = sourceBucket; @@ -351,7 +351,7 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.BUCKET, this.bucketName); auditMap.put(OzoneConsts.BUCKET_LAYOUT, String.valueOf(this.bucketLayout)); auditMap.put(OzoneConsts.GDPR_FLAG, - this.metadata.get(OzoneConsts.GDPR_FLAG)); + getMetadata().get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.ACLS, (this.acls != null) ? this.acls.toString() : null); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, @@ -403,13 +403,13 @@ public Builder toBuilder() { .setIsVersionEnabled(isVersionEnabled) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setBucketEncryptionKey(bekInfo) .setSourceVolume(sourceVolume) .setSourceBucket(sourceBucket) .setAcls(acls) - .addAllMetadata(metadata) + .addAllMetadata(getMetadata()) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) .setQuotaInBytes(quotaInBytes) @@ -607,11 +607,11 @@ public BucketInfo getProtobuf() { .setStorageType(storageType.toProto()) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setUsedBytes(usedBytes) .setUsedNamespace(usedNamespace) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .setQuotaInBytes(quotaInBytes) .setQuotaInNamespace(quotaInNamespace); if (bucketLayout != null) { @@ -739,13 +739,13 @@ public boolean equals(Object o) { Objects.equals(acls, that.acls) && Objects.equals(isVersionEnabled, that.isVersionEnabled) && storageType == that.storageType && - objectID == that.objectID && - updateID == that.updateID && + getObjectID() == that.getObjectID() && + getUpdateID() == that.getUpdateID() && usedBytes == that.usedBytes && usedNamespace == that.usedNamespace && Objects.equals(sourceVolume, that.sourceVolume) && Objects.equals(sourceBucket, that.sourceBucket) && - Objects.equals(metadata, that.metadata) && + Objects.equals(getMetadata(), that.getMetadata()) && Objects.equals(bekInfo, that.bekInfo) && Objects.equals(owner, that.owner) && Objects.equals(defaultReplicationConfig, that.defaultReplicationConfig); @@ -768,9 +768,9 @@ public String toString() { ", bekInfo=" + bekInfo + ", sourceVolume='" + sourceVolume + "'" + ", sourceBucket='" + sourceBucket + "'" + - ", objectID=" + objectID + - ", updateID=" + updateID + - ", metadata=" + metadata + + ", objectID=" + getObjectID() + + ", updateID=" + getUpdateID() + + ", metadata=" + getMetadata() + ", usedBytes=" + usedBytes + ", usedNamespace=" + usedNamespace + ", quotaInBytes=" + quotaInBytes + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java index 3d1940bd7ce2..69ea2fc5b85b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java @@ -58,10 +58,10 @@ public static Codec getCodec() { public OmDirectoryInfo(Builder builder) { this.name = builder.name; this.acls = builder.acls; - this.metadata = builder.metadata; - this.objectID = builder.objectID; - this.updateID = builder.updateID; - this.parentObjectID = builder.parentObjectID; + setMetadata(builder.metadata); + setObjectID(builder.objectID); + setUpdateID(builder.updateID); + setParentObjectID(builder.parentObjectID); this.creationTime = builder.creationTime; this.modificationTime = builder.modificationTime; } @@ -164,10 +164,6 @@ public String toString() { return getPath() + ":" + getObjectID(); } - public long getParentObjectID() { - return parentObjectID; - } - public String getPath() { return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName(); } @@ -196,10 +192,10 @@ public DirectoryInfo getProtobuf() { DirectoryInfo.newBuilder().setName(name) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentID(parentObjectID); + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentID(getParentObjectID()); if (acls != null) { pib.addAllAcls(OzoneAclUtil.toProtobuf(acls)); } @@ -245,16 +241,16 @@ public boolean equals(Object o) { return creationTime == omDirInfo.creationTime && modificationTime == omDirInfo.modificationTime && name.equals(omDirInfo.name) && - Objects.equals(metadata, omDirInfo.metadata) && + Objects.equals(getMetadata(), omDirInfo.getMetadata()) && Objects.equals(acls, omDirInfo.acls) && - objectID == omDirInfo.objectID && - updateID == omDirInfo.updateID && - parentObjectID == omDirInfo.parentObjectID; + getObjectID() == omDirInfo.getObjectID() && + getUpdateID() == omDirInfo.getUpdateID() && + getParentObjectID() == omDirInfo.getParentObjectID(); } @Override public int hashCode() { - return Objects.hash(objectID, parentObjectID, name); + return Objects.hash(getObjectID(), getParentObjectID(), name); } /** @@ -266,16 +262,16 @@ public OmDirectoryInfo copyObject() { .setName(name) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .setParentObjectID(parentObjectID) - .setObjectID(objectID) - .setUpdateID(updateID); + .setParentObjectID(getParentObjectID()) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()); acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), acl.getName(), (BitSet) acl.getAclBitSet().clone(), acl.getAclScope()))); - if (metadata != null) { - builder.addAllMetadata(metadata); + if (getMetadata() != null) { + builder.addAllMetadata(getMetadata()); } return builder.build(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index c3a1a4a3d77b..b2297accf85e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -116,11 +116,11 @@ public static Codec getCodec(boolean ignorePipeline) { this.creationTime = creationTime; this.modificationTime = modificationTime; this.replicationConfig = replicationConfig; - this.metadata = metadata; + setMetadata(metadata); this.encInfo = encInfo; this.acls = acls; - this.objectID = objectID; - this.updateID = updateID; + setObjectID(objectID); + setUpdateID(updateID); this.fileChecksum = fileChecksum; } @@ -137,7 +137,7 @@ public static Codec getCodec(boolean ignorePipeline) { creationTime, modificationTime, replicationConfig, metadata, encInfo, acls, objectID, updateID, fileChecksum); this.fileName = fileName; - this.parentObjectID = parentObjectID; + setParentObjectID(parentObjectID); this.isFile = isFile; } @@ -181,11 +181,6 @@ public String getFileName() { return fileName; } - public long getParentObjectID() { - return parentObjectID; - } - - public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { return keyLocationVersions.size() == 0 ? null : keyLocationVersions.get(keyLocationVersions.size() - 1); @@ -213,7 +208,7 @@ public boolean isFile() { } public boolean isHsync() { - return metadata.containsKey(OzoneConsts.HSYNC_CLIENT_ID); + return getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID); } /** @@ -413,10 +408,6 @@ public boolean setAcls(List newAcls) { return OzoneAclUtil.setAcl(acls, newAcls); } - public void setParentObjectID(long parentObjectID) { - this.parentObjectID = parentObjectID; - } - public void setReplicationConfig(ReplicationConfig repConfig) { this.replicationConfig = repConfig; } @@ -674,11 +665,11 @@ private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName, .addAllKeyLocationList(keyLocations) .setCreationTime(creationTime) .setModificationTime(modificationTime) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .addAllAcls(OzoneAclUtil.toProtobuf(acls)) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentID(parentObjectID); + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentID(getParentObjectID()); FileChecksumProto fileChecksumProto = OMPBHelper.convert(fileChecksum); if (fileChecksumProto != null) { @@ -753,8 +744,8 @@ public String getObjectInfo() { ", key='" + keyName + '\'' + ", dataSize='" + dataSize + '\'' + ", creationTime='" + creationTime + '\'' + - ", objectID='" + objectID + '\'' + - ", parentID='" + parentObjectID + '\'' + + ", objectID='" + getObjectID() + '\'' + + ", parentID='" + getParentObjectID() + '\'' + ", replication='" + replicationConfig + '\'' + ", fileChecksum='" + fileChecksum + '}'; @@ -770,12 +761,12 @@ public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, volumeName.equals(omKeyInfo.volumeName) && bucketName.equals(omKeyInfo.bucketName) && replicationConfig.equals(omKeyInfo.replicationConfig) && - Objects.equals(metadata, omKeyInfo.metadata) && + Objects.equals(getMetadata(), omKeyInfo.getMetadata()) && Objects.equals(acls, omKeyInfo.acls) && - objectID == omKeyInfo.objectID; + getObjectID() == omKeyInfo.getObjectID(); if (isEqual && checkUpdateID) { - isEqual = updateID == omKeyInfo.updateID; + isEqual = getUpdateID() == omKeyInfo.getUpdateID(); } if (isEqual && checkModificationTime) { @@ -783,7 +774,7 @@ public boolean isKeyInfoSame(OmKeyInfo omKeyInfo, boolean checkPath, } if (isEqual && checkPath) { - isEqual = parentObjectID == omKeyInfo.parentObjectID && + isEqual = getParentObjectID() == omKeyInfo.getParentObjectID() && keyName.equals(omKeyInfo.keyName); } @@ -808,7 +799,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(volumeName, bucketName, keyName, parentObjectID); + return Objects.hash(volumeName, bucketName, keyName, getParentObjectID()); } /** @@ -825,9 +816,9 @@ public OmKeyInfo copyObject() { .setDataSize(dataSize) .setReplicationConfig(replicationConfig) .setFileEncryptionInfo(encInfo) - .setObjectID(objectID) - .setUpdateID(updateID) - .setParentObjectID(parentObjectID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) + .setParentObjectID(getParentObjectID()) .setFileName(fileName) .setFile(isFile); @@ -841,8 +832,8 @@ public OmKeyInfo copyObject() { acl.getName(), (BitSet) acl.getAclBitSet().clone(), acl.getAclScope()))); - if (metadata != null) { - metadata.forEach((k, v) -> builder.addMetadata(k, v)); + if (getMetadata() != null) { + getMetadata().forEach((k, v) -> builder.addMetadata(k, v)); } if (fileChecksum != null) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 4f57e075bd70..90b6301437ca 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -170,8 +170,8 @@ private OmMultipartKeyInfo(String id, long creationTime, this.creationTime = creationTime; this.replicationConfig = replicationConfig; this.partKeyInfoMap = sortedMap; - this.objectID = objectID; - this.updateID = updateID; + setObjectID(objectID); + setUpdateID(updateID); this.parentID = parentObjId; } @@ -323,8 +323,8 @@ public MultipartKeyInfo getProto() { .setUploadID(uploadID) .setCreationTime(creationTime) .setType(replicationConfig.getReplicationType()) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setParentID(parentID); if (replicationConfig instanceof ECReplicationConfig) { @@ -362,7 +362,7 @@ public OmMultipartKeyInfo copyObject() { // is added, it returns a new shallow copy of the PartKeyInfoMap Object // so here we can directly pass in partKeyInfoMap return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoMap, objectID, updateID, parentID); + partKeyInfoMap, getObjectID(), getUpdateID(), parentID); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index c5c8f5ca8e2b..1a12d4ddb38f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -102,12 +102,12 @@ private OmVolumeArgs(String adminName, String ownerName, String volume, this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; this.usedNamespace = usedNamespace; - this.metadata = metadata; + setMetadata(metadata); this.acls = acls; this.creationTime = creationTime; this.modificationTime = modificationTime; - this.objectID = objectID; - this.updateID = updateID; + setObjectID(objectID); + setUpdateID(updateID); this.refCount = refCount; } @@ -286,12 +286,12 @@ public boolean equals(Object o) { return false; } OmVolumeArgs that = (OmVolumeArgs) o; - return Objects.equals(this.objectID, that.objectID); + return Objects.equals(this.getObjectID(), that.getObjectID()); } @Override public int hashCode() { - return Objects.hash(this.objectID); + return Objects.hash(getObjectID()); } /** @@ -430,13 +430,13 @@ public VolumeInfo getProtobuf() { .setQuotaInBytes(quotaInBytes) .setQuotaInNamespace(quotaInNamespace) .setUsedNamespace(usedNamespace) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) .addAllVolumeAcls(aclList) .setCreationTime( creationTime == 0 ? System.currentTimeMillis() : creationTime) .setModificationTime(modificationTime) - .setObjectID(objectID) - .setUpdateID(updateID) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()) .setRefCount(refCount) .build(); } @@ -476,8 +476,8 @@ public String getObjectInfo() { @Override public OmVolumeArgs copyObject() { Map cloneMetadata = new HashMap<>(); - if (metadata != null) { - metadata.forEach((k, v) -> cloneMetadata.put(k, v)); + if (getMetadata() != null) { + getMetadata().forEach((k, v) -> cloneMetadata.put(k, v)); } List cloneAcls = new ArrayList(acls.size()); @@ -488,6 +488,6 @@ public OmVolumeArgs copyObject() { return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, quotaInNamespace, usedNamespace, cloneMetadata, cloneAcls, - creationTime, modificationTime, objectID, updateID, refCount); + creationTime, modificationTime, getObjectID(), getUpdateID(), refCount); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java index 5c49a15a12bf..cc190399a7da 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithMetadata.java @@ -23,22 +23,21 @@ /** * Mixin class to handle custom metadata. */ -public class WithMetadata { +public abstract class WithMetadata { - @SuppressWarnings("visibilitymodifier") - protected Map metadata = new HashMap<>(); + private Map metadata = new HashMap<>(); /** * Custom key value metadata. */ - public Map getMetadata() { + public final Map getMetadata() { return metadata; } /** * Set custom key value metadata. */ - public void setMetadata(Map metadata) { + public final void setMetadata(Map metadata) { this.metadata = metadata; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java index 0ea1a1c0e6a7..fb677871fab6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java @@ -22,34 +22,24 @@ /** * Mixin class to handle ObjectID and UpdateID. */ -public class WithObjectID extends WithMetadata { +public abstract class WithObjectID extends WithMetadata { + + private long objectID; + private long updateID; /** * ObjectIDs are unique and immutable identifier for each object in the * System. */ - @SuppressWarnings("visibilitymodifier") - protected long objectID; - /** - * UpdateIDs are monotonically increasing values which are updated - * each time there is an update. - */ - @SuppressWarnings("visibilitymodifier") - protected long updateID; - - /** - * Returns objectID. - * @return long - */ - public long getObjectID() { + public final long getObjectID() { return objectID; } /** - * Returns updateID. - * @return long + * UpdateIDs are monotonically increasing values which are updated + * each time there is an update. */ - public long getUpdateID() { + public final long getUpdateID() { return updateID; } @@ -62,7 +52,7 @@ public long getUpdateID() { * * @param obId - long */ - public void setObjectID(long obId) { + public final void setObjectID(long obId) { if (this.objectID != 0 && obId != OBJECT_ID_RECLAIM_BLOCKS) { throw new UnsupportedOperationException("Attempt to modify object ID " + "which is not zero. Current Object ID is " + this.objectID); @@ -76,7 +66,7 @@ public void setObjectID(long obId) { * @param updateId long * @param isRatisEnabled boolean */ - public void setUpdateID(long updateId, boolean isRatisEnabled) { + public final void setUpdateID(long updateId, boolean isRatisEnabled) { // Because in non-HA, we have multiple rpc handler threads and // transactionID is generated in OzoneManagerServerSideTranslatorPB. @@ -103,21 +93,22 @@ public void setUpdateID(long updateId, boolean isRatisEnabled) { // Main reason, in non-HA transaction Index after restart starts from 0. // And also because of this same reason we don't do replay checks in non-HA. - if (isRatisEnabled && updateId < this.updateID) { + if (isRatisEnabled && updateId < this.getUpdateID()) { throw new IllegalArgumentException(String.format( "Trying to set updateID to %d which is not greater than the " + - "current value of %d for %s", updateId, this.updateID, + "current value of %d for %s", updateId, this.getUpdateID(), getObjectInfo())); } - this.updateID = updateId; - } - - public boolean isUpdateIDset() { - return this.updateID > 0; + this.setUpdateID(updateId); } + /** Hook method, customized in subclasses. */ public String getObjectInfo() { return this.toString(); } + + public final void setUpdateID(long updateID) { + this.updateID = updateID; + } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java index 79a135af1726..b92b34e8e3bd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithParentObjectId.java @@ -22,6 +22,8 @@ * Object ID with additional parent ID field. */ public class WithParentObjectId extends WithObjectID { + private long parentObjectID; + /** * Object ID with additional parent ID field. * @@ -45,11 +47,11 @@ public class WithParentObjectId extends WithObjectID { * key1 | 1026 | 1025 | * ------------------------------------------| */ - @SuppressWarnings("visibilitymodifier") - protected long parentObjectID; - - public long getParentObjectID() { + public final long getParentObjectID() { return parentObjectID; } + public final void setParentObjectID(long parentObjectID) { + this.parentObjectID = parentObjectID; + } } diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java index 4cc76868f745..d6fb140659c7 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java @@ -55,9 +55,9 @@ public OmPrefixInfo(String name, List acls, Map metadata, long objectId, long updateId) { this.name = name; this.acls = acls; - this.metadata = metadata; - this.objectID = objectId; - this.updateID = updateId; + setMetadata(metadata); + setObjectID(objectId); + setUpdateID(updateId); } /** @@ -164,9 +164,9 @@ public OmPrefixInfo build() { public PersistedPrefixInfo getProtobuf() { PersistedPrefixInfo.Builder pib = PersistedPrefixInfo.newBuilder().setName(name) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) - .setObjectID(objectID) - .setUpdateID(updateID); + .addAllMetadata(KeyValueUtil.toProtobuf(getMetadata())) + .setObjectID(getObjectID()) + .setUpdateID(getUpdateID()); if (acls != null) { pib.addAllAcls(OzoneAclStorageUtil.toProtobuf(acls)); } @@ -210,14 +210,14 @@ public boolean equals(Object o) { OmPrefixInfo that = (OmPrefixInfo) o; return name.equals(that.name) && Objects.equals(acls, that.acls) && - Objects.equals(metadata, that.metadata) && - objectID == that.objectID && - updateID == that.updateID; + Objects.equals(getMetadata(), that.getMetadata()) && + getObjectID() == that.getObjectID() && + getUpdateID() == that.getUpdateID(); } @Override public int hashCode() { - return Objects.hash(name, acls, metadata, objectID, updateID); + return Objects.hash(name, acls, getMetadata(), getObjectID(), getUpdateID()); } @Override @@ -225,9 +225,9 @@ public String toString() { return "OmPrefixInfo{" + "name='" + name + '\'' + ", acls=" + acls + - ", metadata=" + metadata + - ", objectID=" + objectID + - ", updateID=" + updateID + + ", metadata=" + getMetadata() + + ", objectID=" + getObjectID() + + ", updateID=" + getUpdateID() + '}'; } @@ -241,10 +241,10 @@ public OmPrefixInfo copyObject() { .collect(Collectors.toList()); Map metadataList = new HashMap<>(); - if (metadata != null) { - metadata.forEach((k, v) -> metadataList.put(k, v)); + if (getMetadata() != null) { + getMetadata().forEach((k, v) -> metadataList.put(k, v)); } - return new OmPrefixInfo(name, aclList, metadataList, objectID, updateID); + return new OmPrefixInfo(name, aclList, metadataList, getObjectID(), getUpdateID()); } } From d883d7df4600821f92b884c18ba960976d589138 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran <47532440+swamirishi@users.noreply.github.com> Date: Thu, 22 Feb 2024 14:23:22 -0800 Subject: [PATCH 045/108] HDDS-10149. New JNI layer for RawSSTFileReader & RawSSTFileReaderIterator (#6182) --- .../src/main/resources/ozone-default.xml | 18 - .../db/managed/JniLibNamePropertyWriter.java | 48 + hadoop-hdds/rocks-native/pom.xml | 205 +--- hadoop-hdds/rocks-native/src/CMakeLists.txt | 36 +- .../hadoop/hdds/utils/NativeConstants.java | 3 +- .../db/managed/ManagedRawSSTFileIterator.java | 118 ++ .../db/managed/ManagedRawSSTFileReader.java | 78 ++ .../db/managed/ManagedSSTDumpIterator.java | 314 ----- .../utils/db/managed/ManagedSSTDumpTool.java | 99 -- .../utils/db/managed/PipeInputStream.java | 88 -- .../main/native/ManagedRawSSTFileIterator.cpp | 90 ++ .../main/native/ManagedRawSSTFileReader.cpp | 65 + .../rocks-native/src/main/native/Pipe.cpp | 37 - .../rocks-native/src/main/native/Pipe.h | 55 - .../src/main/native/PipeInputStream.cpp | 48 - .../src/main/native/SSTDumpTool.cpp | 48 - .../main/native/cplusplus_to_java_convert.h | 2 +- .../src/main/patches/rocks-native.patch | 1085 ++++++++--------- .../hdds/utils/TestNativeLibraryLoader.java | 33 +- .../TestManagedRawSSTFileIterator.java | 143 +++ .../managed/TestManagedSSTDumpIterator.java | 282 ----- .../src/test/resources/auditlog.properties | 76 ++ .../src/test/resources/log4j.properties | 23 + .../ozone/rocksdb/util/SstFileSetReader.java | 209 ++-- .../rocksdb/util/TestSstFileSetReader.java | 62 +- .../apache/hadoop/ozone/om/OMConfigKeys.java | 12 - hadoop-ozone/dev-support/checks/native.sh | 17 +- .../TestOmSnapshotFsoWithNativeLib.java | 2 - .../om/snapshot/SnapshotDiffManager.java | 59 +- .../om/snapshot/TestSnapshotDiffManager.java | 26 +- pom.xml | 20 +- 31 files changed, 1396 insertions(+), 2005 deletions(-) create mode 100644 hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java create mode 100644 hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java create mode 100644 hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java delete mode 100644 hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java delete mode 100644 hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java delete mode 100644 hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java create mode 100644 hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp create mode 100644 hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp delete mode 100644 hadoop-hdds/rocks-native/src/main/native/Pipe.cpp delete mode 100644 hadoop-hdds/rocks-native/src/main/native/Pipe.h delete mode 100644 hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp delete mode 100644 hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp create mode 100644 hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java delete mode 100644 hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java create mode 100644 hadoop-hdds/rocks-native/src/test/resources/auditlog.properties create mode 100644 hadoop-hdds/rocks-native/src/test/resources/log4j.properties diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index bfb0547caf60..251ad5c2ff30 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4273,15 +4273,6 @@
- - ozone.om.snapshot.sst_dumptool.pool.size - 1 - OZONE, OM - - Threadpool size for SST Dumptool which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.load.native.lib true @@ -4291,15 +4282,6 @@ - - ozone.om.snapshot.sst_dumptool.buffer.size - 8KB - OZONE, OM - - Buffer size for SST Dumptool Pipe which would be used for computing snapdiff when native library is enabled. - - - ozone.om.snapshot.diff.max.allowed.keys.changed.per.job 10000000 diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java new file mode 100644 index 000000000000..46fbeb412a84 --- /dev/null +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/JniLibNamePropertyWriter.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.rocksdb.util.Environment; + +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; + +/** + * Class to write the rocksdb lib name to a file. + * This would be used to build native ozone_rocksdb_tools library. + */ +public final class JniLibNamePropertyWriter { + + private JniLibNamePropertyWriter() { + } + + public static void main(String[] args) { + String filePath = args[0]; + try (Writer writer = new OutputStreamWriter( + Files.newOutputStream(Paths.get(filePath)), StandardCharsets.UTF_8)) { + writer.write("rocksdbLibName=" + + Environment.getJniLibraryFileName("rocksdb")); + } catch (IOException e) { + e.printStackTrace(); + } + } +} diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index c12ddbb091bd..2146da977ffd 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -55,8 +55,6 @@ 8 8 - https://sourceware.org/pub/bzip2/bzip2-${bzip2.version}.tar.gz - https://zlib.net/fossils/zlib-${zlib.version}.tar.gz @@ -113,79 +111,80 @@ - com.googlecode.maven-download-plugin - download-maven-plugin + org.codehaus.mojo + exec-maven-plugin - rocksdb source download - generate-sources - - wget - - - https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz - rocksdb-v${rocksdb.version}.tar.gz - ${project.build.directory}/rocksdb - - - - zlib source download - generate-sources - - wget - - - ${zlib.url} - zlib-${zlib.version}.tar.gz - ${project.build.directory}/zlib - - - - bzip2 source download - generate-sources + set-property + initialize - wget + java - ${bzip2.url} - bzip2-v${bzip2.version}.tar.gz - ${project.build.directory}/bzip2 + org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter + + ${project.build.directory}/propertyFile.txt + + + + + org.codehaus.mojo + properties-maven-plugin + - lz4 source download - generate-sources + read-property-from-file + initialize - wget + read-project-properties - https://github.com/lz4/lz4/archive/refs/tags/v${lz4.version}.tar.gz - lz4-v${lz4.version}.tar.gz - ${project.build.directory}/lz4 + + ${project.build.directory}/propertyFile.txt + + + + + org.apache.maven.plugins + maven-dependency-plugin + - snappy source download - generate-sources + unpack-dependency + initialize - wget + unpack - https://github.com/google/snappy/archive/refs/tags/${snappy.version}.tar.gz - snappy-v${snappy.version}.tar.gz - ${project.build.directory}/snappy + + + org.rocksdb + rocksdbjni + jar + false + ${project.build.directory}/rocksdbjni + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + - zstd source download + rocksdb source download generate-sources wget - https://github.com/facebook/zstd/archive/refs/tags/v${zstd.version}.tar.gz - zstd-v${zstd.version}.tar.gz - ${project.build.directory}/zstd + https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz + rocksdb-v${rocksdb.version}.tar.gz + ${project.build.directory}/rocksdb @@ -219,89 +218,6 @@ - - - - - - - - - - run - - - - build-zlib - process-sources - - - - - - - - - - - - run - - - - build-bzip2 - process-sources - - - - - - - - - run - - - - build-lz4 - process-sources - - - - - - - - - run - - - - build-zstd - process-sources - - - - - - - - - run - - - - build-snappy - process-sources - - - - - - - - - @@ -320,10 +236,9 @@ - - + - + @@ -346,14 +261,12 @@ - - - - - - + + + @@ -423,8 +336,8 @@ ${env.JAVA_HOME}/bin/javah - org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool - org.apache.hadoop.hdds.utils.db.managed.PipeInputStream + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader + org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator ${project.build.directory}/native/javah @@ -479,8 +392,8 @@ ${project.build.outputDirectory}:${project.build.directory}/dependency/* -h ${project.build.directory}/native/javah - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java - ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java + ${project.basedir}/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReaderIterator.java diff --git a/hadoop-hdds/rocks-native/src/CMakeLists.txt b/hadoop-hdds/rocks-native/src/CMakeLists.txt index 051660777493..4639e2a8c927 100644 --- a/hadoop-hdds/rocks-native/src/CMakeLists.txt +++ b/hadoop-hdds/rocks-native/src/CMakeLists.txt @@ -21,6 +21,7 @@ # cmake_minimum_required(VERSION 2.8) +add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC") project(ozone_native) @@ -36,43 +37,18 @@ endif() include_directories(${GENERATED_JAVAH}) if(${SST_DUMP_INCLUDE}) include_directories(${ROCKSDB_HEADERS}) - set(SOURCE_FILES ${NATIVE_DIR}/SSTDumpTool.cpp ${NATIVE_DIR}/PipeInputStream.cpp ${NATIVE_DIR}/Pipe.h ${NATIVE_DIR}/Pipe.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) - ADD_LIBRARY(rocksdb STATIC IMPORTED) + set(SOURCE_FILES ${NATIVE_DIR}/ManagedRawSSTFileReader.cpp ${NATIVE_DIR}/ManagedRawSSTFileIterator.cpp ${NATIVE_DIR}/cplusplus_to_java_convert.h) + ADD_LIBRARY(rocksdb SHARED IMPORTED) set_target_properties( rocksdb PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb.a) + IMPORTED_LOCATION ${ROCKSDB_LIB}) ADD_LIBRARY(rocks_tools STATIC IMPORTED) set_target_properties( rocks_tools PROPERTIES - IMPORTED_LOCATION ${ROCKSDB_LIB}/librocksdb_tools.a) - ADD_LIBRARY(bz2 STATIC IMPORTED) - set_target_properties( - bz2 - PROPERTIES - IMPORTED_LOCATION ${BZIP2_LIB}/libbz2.a) - ADD_LIBRARY(zlib STATIC IMPORTED) - set_target_properties( - zlib - PROPERTIES - IMPORTED_LOCATION ${ZLIB_LIB}/libz.a) - ADD_LIBRARY(lz4 STATIC IMPORTED) - set_target_properties( - lz4 - PROPERTIES - IMPORTED_LOCATION ${LZ4_LIB}/liblz4.a) - ADD_LIBRARY(snappy STATIC IMPORTED) - set_target_properties( - snappy - PROPERTIES - IMPORTED_LOCATION ${SNAPPY_LIB}/libsnappy.a) - ADD_LIBRARY(zstd STATIC IMPORTED) - set_target_properties( - zstd - PROPERTIES - IMPORTED_LOCATION ${ZSTD_LIB}/libzstd.a) - set(linked_libraries ${linked_libraries} bz2 zlib rocks_tools rocksdb lz4 snappy zstd) + IMPORTED_LOCATION ${ROCKSDB_TOOLS_LIB}/librocksdb_tools.a) + set(linked_libraries ${linked_libraries} rocks_tools rocksdb) endif() add_library(ozone_rocksdb_tools SHARED ${SOURCE_FILES}) target_link_libraries(ozone_rocksdb_tools ${linked_libraries}) diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java index d3121144d37a..8937f0803a18 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeConstants.java @@ -26,6 +26,5 @@ public final class NativeConstants { private NativeConstants() { } - public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME - = "ozone_rocksdb_tools"; + public static final String ROCKS_TOOLS_NATIVE_LIBRARY_NAME = "ozone_rocksdb_tools"; } diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..02125951c1fe --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileIterator.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import com.google.common.primitives.UnsignedLong; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.util.ClosableIterator; + +import java.util.Arrays; +import java.util.NoSuchElementException; +import java.util.function.Function; + +/** + * Iterator for SSTFileReader which would read all entries including tombstones. + */ +public class ManagedRawSSTFileIterator implements ClosableIterator { + // Native address of pointer to the object. + private final long nativeHandle; + private final Function transformer; + + ManagedRawSSTFileIterator(long nativeHandle, Function transformer) { + this.nativeHandle = nativeHandle; + this.transformer = transformer; + } + + private native boolean hasNext(long handle); + private native void next(long handle); + private native byte[] getKey(long handle); + private native byte[] getValue(long handle); + private native long getSequenceNumber(long handle); + private native int getType(long handle); + + @Override + public boolean hasNext() { + return this.hasNext(nativeHandle); + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + KeyValue keyValue = new KeyValue(this.getKey(nativeHandle), + UnsignedLong.fromLongBits(this.getSequenceNumber(this.nativeHandle)), + this.getType(nativeHandle), + this.getValue(nativeHandle)); + this.next(nativeHandle); + return this.transformer.apply(keyValue); + } + + private native void closeInternal(long handle); + + @Override + public void close() { + this.closeInternal(this.nativeHandle); + } + + /** + * Class containing Parsed KeyValue Record from RawSstReader output. + */ + public static final class KeyValue { + + private final byte[] key; + private final UnsignedLong sequence; + private final Integer type; + private final byte[] value; + + private KeyValue(byte[] key, UnsignedLong sequence, Integer type, + byte[] value) { + this.key = key; + this.sequence = sequence; + this.type = type; + this.value = value; + } + + public byte[] getKey() { + return Arrays.copyOf(key, key.length); + } + + public UnsignedLong getSequence() { + return sequence; + } + + public Integer getType() { + return type; + } + + public byte[] getValue() { + return Arrays.copyOf(value, value.length); + } + + @Override + public String toString() { + return "KeyValue{" + + "key=" + StringUtils.bytes2String(key) + + ", sequence=" + sequence + + ", type=" + type + + ", value=" + StringUtils.bytes2String(value) + + '}'; + } + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java new file mode 100644 index 000000000000..7c8783b43948 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRawSSTFileReader.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.hadoop.hdds.utils.NativeLibraryLoader; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.util.function.Function; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + +/** + * JNI for RocksDB RawSSTFileReader. + */ +public class ManagedRawSSTFileReader implements Closeable { + + public static boolean loadLibrary() throws NativeLibraryNotLoadedException { + ManagedRocksObjectUtils.loadRocksDBLibrary(); + if (!NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { + throw new NativeLibraryNotLoadedException(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + } + return true; + } + + private final String fileName; + // Native address of pointer to the object. + private final long nativeHandle; + private static final Logger LOG = LoggerFactory.getLogger(ManagedRawSSTFileReader.class); + + public ManagedRawSSTFileReader(final ManagedOptions options, final String fileName, final int readAheadSize) { + this.fileName = fileName; + this.nativeHandle = this.newRawSSTFileReader(options.getNativeHandle(), fileName, readAheadSize); + } + + public ManagedRawSSTFileIterator newIterator( + Function transformerFunction, + ManagedSlice fromSlice, ManagedSlice toSlice) { + long fromNativeHandle = fromSlice == null ? 0 : fromSlice.getNativeHandle(); + long toNativeHandle = toSlice == null ? 0 : toSlice.getNativeHandle(); + LOG.info("Iterating SST file: {} with native lib. " + + "LowerBound: {}, UpperBound: {}", fileName, fromSlice, toSlice); + return new ManagedRawSSTFileIterator<>( + newIterator(this.nativeHandle, fromSlice != null, + fromNativeHandle, toSlice != null, toNativeHandle), + transformerFunction); + } + + private native long newRawSSTFileReader(long optionsHandle, String filePath, int readSize); + + + private native long newIterator(long handle, boolean hasFrom, long fromSliceHandle, boolean hasTo, + long toSliceHandle); + + private native void disposeInternal(long handle); + + @Override + public void close() { + disposeInternal(nativeHandle); + } +} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java deleted file mode 100644 index d8844eaacbcd..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpIterator.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.collect.Maps; -import com.google.common.primitives.UnsignedLong; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.util.ClosableIterator; -import org.eclipse.jetty.io.RuntimeIOException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -/** - * Iterator to Parse output of RocksDBSSTDumpTool. - */ -public abstract class ManagedSSTDumpIterator implements ClosableIterator { - - private static final Logger LOG = - LoggerFactory.getLogger(ManagedSSTDumpIterator.class); - // Since we don't have any restriction on the key & value, we are prepending - // the length of the pattern in the sst dump tool output. - // The first token in the pattern is the key. - // The second tells the sequence number of the key. - // The third token gives the type of key in the sst file. - // The fourth token - private InputStream processOutput; - private Optional currentKey; - private byte[] intBuffer; - private Optional nextKey; - - private ManagedSSTDumpTool.SSTDumpToolTask sstDumpToolTask; - private AtomicBoolean open; - private StackTraceElement[] stackTrace; - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options) - throws IOException { - this(sstDumpTool, sstFilePath, options, null, null); - } - - public ManagedSSTDumpIterator(ManagedSSTDumpTool sstDumpTool, - String sstFilePath, ManagedOptions options, - ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) - throws IOException { - File sstFile = new File(sstFilePath); - if (!sstFile.exists()) { - throw new IOException(String.format("File in path : %s doesn't exist", - sstFile.getAbsolutePath())); - } - if (!sstFile.isFile()) { - throw new IOException(String.format("Path given: %s is not a file", - sstFile.getAbsolutePath())); - } - init(sstDumpTool, sstFile, options, lowerKeyBound, upperKeyBound); - this.stackTrace = Thread.currentThread().getStackTrace(); - } - - /** - * Parses next occuring number in the stream. - * - * @return Optional of the integer empty if no integer exists - */ - private Optional getNextNumberInStream() throws IOException { - int n = processOutput.read(intBuffer, 0, 4); - if (n == 4) { - return Optional.of(ByteBuffer.wrap(intBuffer).getInt()); - } else if (n >= 0) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.empty(); - } - - private Optional getNextByteArray() throws IOException { - Optional size = getNextNumberInStream(); - if (size.isPresent()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Allocating byte array, size: {}", size.get()); - } - byte[] b = new byte[size.get()]; - int n = processOutput.read(b); - if (n >= 0 && n != size.get()) { - throw new IllegalStateException(String.format("Integer expects " + - "4 bytes to be read from the stream, but read only %d bytes", n)); - } - return Optional.of(b); - } - return Optional.empty(); - } - - private Optional getNextUnsignedLong() throws IOException { - long val = 0; - for (int i = 0; i < 8; i++) { - val = val << 8; - int nextByte = processOutput.read(); - if (nextByte < 0) { - if (i == 0) { - return Optional.empty(); - } - throw new IllegalStateException(String.format("Long expects " + - "8 bytes to be read from the stream, but read only %d bytes", i)); - } - val += nextByte; - } - return Optional.of(UnsignedLong.fromLongBits(val)); - } - - private void init(ManagedSSTDumpTool sstDumpTool, File sstFile, - ManagedOptions options, ManagedSlice lowerKeyBound, - ManagedSlice upperKeyBound) { - Map argMap = Maps.newHashMap(); - argMap.put("file", sstFile.getAbsolutePath()); - argMap.put("silent", null); - argMap.put("command", "scan"); - // strings containing '\0' do not have the same value when encode UTF-8 on - // java which is 0. But in jni the utf-8 encoded value for '\0' - // becomes -64 -128. Thus the value becomes different. - // In order to support this, changes have been made on the rocks-tools - // to pass the address of the ManagedSlice and the jni can use the object - // of slice directly from there. - if (Objects.nonNull(lowerKeyBound)) { - argMap.put("from", String.valueOf(lowerKeyBound.getNativeHandle())); - } - if (Objects.nonNull(upperKeyBound)) { - argMap.put("to", String.valueOf(upperKeyBound.getNativeHandle())); - } - this.sstDumpToolTask = sstDumpTool.run(argMap, options); - processOutput = sstDumpToolTask.getPipedOutput(); - intBuffer = new byte[4]; - open = new AtomicBoolean(true); - currentKey = Optional.empty(); - nextKey = Optional.empty(); - next(); - } - - /** - * Throws Runtime exception in the case iterator is closed or - * the native Dumptool exited with non zero exit value. - */ - private void checkSanityOfProcess() { - if (!this.open.get()) { - throw new RuntimeException("Iterator has been closed"); - } - if (sstDumpToolTask.getFuture().isDone() && - sstDumpToolTask.exitValue() != 0) { - throw new RuntimeException("Process Terminated with non zero " + - String.format("exit value %d", sstDumpToolTask.exitValue())); - } - } - - /** - * Checks the status of the process & sees if there is another record. - * - * @return True if next exists & false otherwise - * Throws Runtime Exception in case of SST File read failure - */ - - @Override - public boolean hasNext() { - checkSanityOfProcess(); - return nextKey.isPresent(); - } - - /** - * Transforms Key to a certain value. - * - * @param value - * @return transformed Value - */ - protected abstract T getTransformedValue(Optional value); - - /** - * Returns the next record from SSTDumpTool. - * - * @return next Key - * Throws Runtime Exception incase of failure. - */ - @Override - public T next() { - checkSanityOfProcess(); - currentKey = nextKey; - nextKey = Optional.empty(); - try { - Optional key = getNextByteArray(); - if (!key.isPresent()) { - return getTransformedValue(currentKey); - } - UnsignedLong sequenceNumber = getNextUnsignedLong() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number" + - " for key %s", StringUtils.bytes2String(key.get())))); - - Integer type = getNextNumberInStream() - .orElseThrow(() -> new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString()))); - byte[] val = getNextByteArray().orElseThrow(() -> - new IllegalStateException( - String.format("Error while trying to read sequence number for " + - "key %s with sequence number %s of type %d", - StringUtils.bytes2String(key.get()), - sequenceNumber.toString(), type))); - nextKey = Optional.of(new KeyValue(key.get(), sequenceNumber, type, val)); - } catch (IOException e) { - // TODO [SNAPSHOT] Throw custom snapshot exception - throw new RuntimeIOException(e); - } - return getTransformedValue(currentKey); - } - - @Override - public synchronized void close() throws UncheckedIOException { - if (this.sstDumpToolTask != null) { - if (!this.sstDumpToolTask.getFuture().isDone()) { - this.sstDumpToolTask.getFuture().cancel(true); - } - try { - this.processOutput.close(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - open.compareAndSet(true, false); - } - - @Override - protected void finalize() throws Throwable { - if (open.get()) { - LOG.warn("{} is not closed properly." + - " StackTrace for unclosed instance: {}", - this.getClass().getName(), - Arrays.stream(stackTrace) - .map(StackTraceElement::toString).collect( - Collectors.joining("\n"))); - } - this.close(); - super.finalize(); - } - - /** - * Class containing Parsed KeyValue Record from Sst Dumptool output. - */ - public static final class KeyValue { - - private final byte[] key; - private final UnsignedLong sequence; - private final Integer type; - private final byte[] value; - - private KeyValue(byte[] key, UnsignedLong sequence, Integer type, - byte[] value) { - this.key = key; - this.sequence = sequence; - this.type = type; - this.value = value; - } - - public byte[] getKey() { - return key; - } - - public UnsignedLong getSequence() { - return sequence; - } - - public Integer getType() { - return type; - } - - public byte[] getValue() { - return value; - } - - @Override - public String toString() { - return "KeyValue{" + - "key=" + StringUtils.bytes2String(key) + - ", sequence=" + sequence + - ", type=" + type + - ", value=" + StringUtils.bytes2String(value) + - '}'; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java deleted file mode 100644 index 5d965d7398e0..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedSSTDumpTool.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; - -import java.io.InputStream; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; - -/** - * JNI for RocksDB SSTDumpTool. Pipes the output to an output stream - */ -public class ManagedSSTDumpTool { - - private int bufferCapacity; - private ExecutorService executorService; - - public ManagedSSTDumpTool(ExecutorService executorService, - int bufferCapacity) - throws NativeLibraryNotLoadedException { - if (!NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { - throw new NativeLibraryNotLoadedException( - ROCKS_TOOLS_NATIVE_LIBRARY_NAME); - } - this.bufferCapacity = bufferCapacity; - this.executorService = executorService; - } - - public SSTDumpToolTask run(String[] args, ManagedOptions options) { - PipeInputStream pipeInputStream = new PipeInputStream(bufferCapacity); - return new SSTDumpToolTask(this.executorService.submit(() -> - this.runInternal(args, options.getNativeHandle(), - pipeInputStream.getNativeHandle())), pipeInputStream); - } - - public SSTDumpToolTask run(Map args, ManagedOptions options) { - return this.run(args.entrySet().stream().map(e -> "--" - + (e.getValue() == null || e.getValue().isEmpty() ? e.getKey() : - e.getKey() + "=" + e.getValue())).toArray(String[]::new), options); - } - - private native int runInternal(String[] args, long optionsHandle, - long pipeHandle); - - /** - * Class holding piped output of SST Dumptool & future of command. - */ - static class SSTDumpToolTask { - private Future future; - private InputStream pipedOutput; - - SSTDumpToolTask(Future future, InputStream pipedOutput) { - this.future = future; - this.pipedOutput = pipedOutput; - } - - public Future getFuture() { - return future; - } - - public InputStream getPipedOutput() { - return pipedOutput; - } - - public int exitValue() { - if (this.future.isDone()) { - try { - return future.get(); - } catch (InterruptedException | ExecutionException e) { - return 1; - } - } - return 0; - } - } -} diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java deleted file mode 100644 index df4f613f98e2..000000000000 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/db/managed/PipeInputStream.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import java.io.InputStream; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * JNI for reading data from pipe. - */ -public class PipeInputStream extends InputStream { - - private byte[] byteBuffer; - private long nativeHandle; - private int numberOfBytesLeftToRead; - private int index = 0; - private int capacity; - - private AtomicBoolean cleanup; - - PipeInputStream(int capacity) { - this.byteBuffer = new byte[capacity]; - this.numberOfBytesLeftToRead = 0; - this.capacity = capacity; - this.nativeHandle = newPipe(); - this.cleanup = new AtomicBoolean(false); - } - - long getNativeHandle() { - return nativeHandle; - } - - @Override - public int read() { - if (numberOfBytesLeftToRead < 0) { - this.close(); - return -1; - } - while (numberOfBytesLeftToRead == 0) { - numberOfBytesLeftToRead = readInternal(byteBuffer, capacity, - nativeHandle); - index = 0; - if (numberOfBytesLeftToRead != 0) { - return read(); - } - } - numberOfBytesLeftToRead--; - int ret = byteBuffer[index] & 0xFF; - index += 1; - return ret; - } - - private native long newPipe(); - - private native int readInternal(byte[] buff, int numberOfBytes, - long pipeHandle); - - private native void closeInternal(long pipeHandle); - - @Override - public void close() { - if (this.cleanup.compareAndSet(false, true)) { - closeInternal(this.nativeHandle); - } - } - - @Override - protected void finalize() throws Throwable { - close(); - super.finalize(); - } -} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp new file mode 100644 index 000000000000..1cf222528379 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileIterator.cpp @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jboolean Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_hasNext(JNIEnv *env, jobject obj, + jlong native_handle) { + return static_cast(reinterpret_cast(native_handle)->Valid()); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_next(JNIEnv *env, jobject obj, + jlong native_handle) { + reinterpret_cast(native_handle)->Next(); +} + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getKey(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->key(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + + +jbyteArray Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getValue(JNIEnv *env, + jobject obj, + jlong native_handle) { + ROCKSDB_NAMESPACE::Slice slice = reinterpret_cast(native_handle)->value(); + jbyteArray jkey = env->NewByteArray(static_cast(slice.size())); + if (jkey == nullptr) { + // exception thrown: OutOfMemoryError + return nullptr; + } + env->SetByteArrayRegion( + jkey, 0, static_cast(slice.size()), + const_cast(reinterpret_cast(slice.data()))); + return jkey; +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getSequenceNumber(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint64_t sequence_number = + reinterpret_cast(native_handle)->sequenceNumber(); + jlong result; + std::memcpy(&result, &sequence_number, sizeof(jlong)); + return result; +} + + +jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_getType(JNIEnv *env, + jobject obj, + jlong native_handle) { + uint32_t type = reinterpret_cast(native_handle)->type(); + return static_cast(type); +} + + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileIterator_closeInternal(JNIEnv *env, + jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp new file mode 100644 index 000000000000..f3b8dc02639d --- /dev/null +++ b/hadoop-hdds/rocks-native/src/main/native/ManagedRawSSTFileReader.cpp @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader.h" +#include "rocksdb/options.h" +#include "rocksdb/raw_sst_file_reader.h" +#include "rocksdb/raw_iterator.h" +#include +#include "cplusplus_to_java_convert.h" +#include + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newRawSSTFileReader(JNIEnv *env, jobject obj, + jlong options_handle, + jstring jfilename, + jint readahead_size) { + ROCKSDB_NAMESPACE::Options *options = reinterpret_cast(options_handle); + const char *file_path = env->GetStringUTFChars(jfilename, nullptr); + size_t read_ahead_size_value = static_cast(readahead_size); + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + new ROCKSDB_NAMESPACE::RawSstFileReader(*options, file_path, read_ahead_size_value, true, true); + env->ReleaseStringUTFChars(jfilename, file_path); + return GET_CPLUSPLUS_POINTER(raw_sst_file_reader); +} + +jlong Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_newIterator(JNIEnv *env, jobject obj, + jlong native_handle, + jboolean jhas_from, + jlong from_slice_handle, + jboolean jhas_to, + jlong to_slice_handle) { + ROCKSDB_NAMESPACE::Slice* from_slice = nullptr; + ROCKSDB_NAMESPACE::Slice* to_slice = nullptr; + ROCKSDB_NAMESPACE::RawSstFileReader* raw_sst_file_reader = + reinterpret_cast(native_handle); + bool has_from = static_cast(jhas_from); + bool has_to = static_cast(jhas_to); + if (has_from) { + from_slice = reinterpret_cast(from_slice_handle); + } + if (has_to) { + to_slice = reinterpret_cast(to_slice_handle); + } + ROCKSDB_NAMESPACE::RawIterator* iterator = raw_sst_file_reader->newIterator(has_from, from_slice, has_to, to_slice); + return GET_CPLUSPLUS_POINTER(iterator); +} + +void Java_org_apache_hadoop_hdds_utils_db_managed_ManagedRawSSTFileReader_disposeInternal(JNIEnv *env, jobject obj, + jlong native_handle) { + delete reinterpret_cast(native_handle); +} diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp b/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp deleted file mode 100644 index f1dd54438700..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.cpp +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "Pipe.h" -#include - -const int Pipe::READ_FILE_DESCRIPTOR_IDX = 0; -const int Pipe::WRITE_FILE_DESCRIPTOR_IDX = 1; - -Pipe::Pipe() { - pipe(p); - open = true; -} - -Pipe::~Pipe() { - ::close(p[Pipe::READ_FILE_DESCRIPTOR_IDX]); - ::close(p[Pipe::WRITE_FILE_DESCRIPTOR_IDX]); -} - -void Pipe::close() { - open = false; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/Pipe.h b/hadoop-hdds/rocks-native/src/main/native/Pipe.h deleted file mode 100644 index aa75c6311cbc..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/Pipe.h +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ROCKS_NATIVE_PIPE_H -#define ROCKS_NATIVE_PIPE_H - -#include - -class Pipe { - public: - static const int READ_FILE_DESCRIPTOR_IDX; - static const int WRITE_FILE_DESCRIPTOR_IDX; - Pipe(); - ~Pipe(); - void close(); - int getReadFd() { - return getPipeFileDescriptorIndex(READ_FILE_DESCRIPTOR_IDX); - } - - int getWriteFd() { - return getPipeFileDescriptorIndex(WRITE_FILE_DESCRIPTOR_IDX); - } - - int getPipeFileDescriptorIndex(int idx) { - return p[idx]; - } - - bool isOpen() { - return open; - } - - - private: - int p[2]; - FILE* wr; - bool open; - -}; - -#endif //ROCKS_NATIVE_PIPE_H diff --git a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp b/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp deleted file mode 100644 index 53f60cdd65af..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/PipeInputStream.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "Pipe.h" -#include "cplusplus_to_java_convert.h" -#include "org_apache_hadoop_hdds_utils_db_managed_PipeInputStream.h" - - -jlong Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_newPipe(JNIEnv *, jobject) { - Pipe *pipe = new Pipe(); - return GET_CPLUSPLUS_POINTER(pipe); -} - -jint Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_readInternal(JNIEnv *env, jobject object, jbyteArray jbyteArray, jint capacity, jlong nativeHandle) { - int cap_int = capacity; - Pipe *pipe = reinterpret_cast(nativeHandle); - jbyte *b = (env)->GetByteArrayElements(jbyteArray, JNI_FALSE); - cap_int = read(pipe->getReadFd(), b, cap_int); - if (cap_int == 0) { - if (!pipe->isOpen()) { - cap_int = -1; - } - } - env->ReleaseByteArrayElements(jbyteArray, b, 0); - return cap_int; -} - -void Java_org_apache_hadoop_hdds_utils_db_managed_PipeInputStream_closeInternal(JNIEnv *env, jobject object, jlong nativeHandle) { - delete reinterpret_cast(nativeHandle); -} - diff --git a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp b/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp deleted file mode 100644 index 285c5906c2d8..000000000000 --- a/hadoop-hdds/rocks-native/src/main/native/SSTDumpTool.cpp +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool.h" -#include "rocksdb/options.h" -#include "rocksdb/sst_dump_tool.h" -#include -#include "cplusplus_to_java_convert.h" -#include "Pipe.h" -#include - -jint Java_org_apache_hadoop_hdds_utils_db_managed_ManagedSSTDumpTool_runInternal(JNIEnv *env, jobject obj, - jobjectArray argsArray, jlong optionsHandle, jlong pipeHandle) { - ROCKSDB_NAMESPACE::SSTDumpTool dumpTool; - ROCKSDB_NAMESPACE::Options options; - Pipe *pipe = reinterpret_cast(pipeHandle); - int length = env->GetArrayLength(argsArray); - char *args[length + 1]; - for (int i = 0; i < length; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)i); - char *utf_str = (char *)env->GetStringUTFChars(str_val, JNI_FALSE); - args[i + 1] = utf_str; - } - FILE *wr = fdopen(pipe->getWriteFd(), "w"); - int ret = dumpTool.Run(length + 1, args, options, wr); - for (int i = 1; i < length + 1; i++) { - jstring str_val = (jstring)env->GetObjectArrayElement(argsArray, (jsize)(i - 1)); - env->ReleaseStringUTFChars(str_val, args[i]); - } - fclose(wr); - pipe->close(); - return ret; -} diff --git a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h index efe9d4a5be24..4862ea12a1b9 100644 --- a/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h +++ b/hadoop-hdds/rocks-native/src/main/native/cplusplus_to_java_convert.h @@ -16,7 +16,7 @@ * limitations under the License. */ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). diff --git a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch index 841c2533b863..12dc74614a45 100644 --- a/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch +++ b/hadoop-hdds/rocks-native/src/main/patches/rocks-native.patch @@ -16,592 +16,531 @@ * limitations under the License. */ -diff --git a/include/rocksdb/sst_dump_tool.h b/include/rocksdb/sst_dump_tool.h -index 9261ba47d..1e62b88a3 100644 ---- a/include/rocksdb/sst_dump_tool.h -+++ b/include/rocksdb/sst_dump_tool.h -@@ -11,7 +11,8 @@ namespace ROCKSDB_NAMESPACE { - - class SSTDumpTool { - public: -- int Run(int argc, char const* const* argv, Options options = Options()); -+ int Run(int argc, char const* const* argv, Options options = Options(), -+ FILE* out = stdout, FILE* err = stderr); - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc -index eefbaaeee..734a2f0dd 100644 ---- a/table/sst_file_dumper.cc -+++ b/table/sst_file_dumper.cc -@@ -45,7 +45,7 @@ SstFileDumper::SstFileDumper(const Options& options, - Temperature file_temp, size_t readahead_size, - bool verify_checksum, bool output_hex, - bool decode_blob_index, const EnvOptions& soptions, -- bool silent) -+ bool silent, FILE* out, FILE* err) - : file_name_(file_path), - read_num_(0), - file_temp_(file_temp), -@@ -57,10 +57,13 @@ SstFileDumper::SstFileDumper(const Options& options, - ioptions_(options_), - moptions_(ColumnFamilyOptions(options_)), - read_options_(verify_checksum, false), -- internal_comparator_(BytewiseComparator()) { -+ internal_comparator_(BytewiseComparator()), -+ out_(out), -+ err_(err) -+ { - read_options_.readahead_size = readahead_size; - if (!silent_) { -- fprintf(stdout, "Process %s\n", file_path.c_str()); -+ fprintf(out_, "Process %s\n", file_path.c_str()); - } - init_result_ = GetTableReader(file_name_); - } -@@ -253,17 +256,17 @@ Status SstFileDumper::ShowAllCompressionSizes( - int32_t compress_level_from, int32_t compress_level_to, - uint32_t max_dict_bytes, uint32_t zstd_max_train_bytes, - uint64_t max_dict_buffer_bytes, bool use_zstd_dict_trainer) { -- fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); -+ fprintf(out_, "Block Size: %" ROCKSDB_PRIszt "\n", block_size); - for (auto& i : compression_types) { - if (CompressionTypeSupported(i.first)) { -- fprintf(stdout, "Compression: %-24s\n", i.second); -+ fprintf(out_, "Compression: %-24s\n", i.second); - CompressionOptions compress_opt; - compress_opt.max_dict_bytes = max_dict_bytes; - compress_opt.zstd_max_train_bytes = zstd_max_train_bytes; - compress_opt.max_dict_buffer_bytes = max_dict_buffer_bytes; - compress_opt.use_zstd_dict_trainer = use_zstd_dict_trainer; - for (int32_t j = compress_level_from; j <= compress_level_to; j++) { -- fprintf(stdout, "Compression level: %d", j); -+ fprintf(out_, "Compression level: %d", j); - compress_opt.level = j; - Status s = ShowCompressionSize(block_size, i.first, compress_opt); - if (!s.ok()) { -@@ -271,7 +274,7 @@ Status SstFileDumper::ShowAllCompressionSizes( - } - } - } else { -- fprintf(stdout, "Unsupported compression type: %s.\n", i.second); -+ fprintf(err_, "Unsupported compression type: %s.\n", i.second); - } - } - return Status::OK(); -@@ -307,9 +310,9 @@ Status SstFileDumper::ShowCompressionSize( - } - - std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); -- fprintf(stdout, " Size: %10" PRIu64, file_size); -- fprintf(stdout, " Blocks: %6" PRIu64, num_data_blocks); -- fprintf(stdout, " Time Taken: %10s microsecs", -+ fprintf(out_, " Size: %10" PRIu64, file_size); -+ fprintf(out_, " Blocks: %6" PRIu64, num_data_blocks); -+ fprintf(out_, " Time Taken: %10s microsecs", - std::to_string( - std::chrono::duration_cast(end - start) - .count()) -@@ -342,11 +345,11 @@ Status SstFileDumper::ShowCompressionSize( - : ((static_cast(not_compressed_blocks) / - static_cast(num_data_blocks)) * - 100.0); -- fprintf(stdout, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, -+ fprintf(out_, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks, - compressed_pcnt); -- fprintf(stdout, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", -+ fprintf(out_, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)", - ratio_not_compressed_blocks, ratio_not_compressed_pcnt); -- fprintf(stdout, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", -+ fprintf(out_, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n", - not_compressed_blocks, not_compressed_pcnt); - return Status::OK(); - } -@@ -362,7 +365,7 @@ Status SstFileDumper::ReadTableProperties(uint64_t table_magic_number, - /* memory_allocator= */ nullptr, prefetch_buffer); - if (!s.ok()) { - if (!silent_) { -- fprintf(stdout, "Not able to read table properties\n"); -+ fprintf(err_, "Not able to read table properties\n"); - } - } - return s; -@@ -410,7 +413,7 @@ Status SstFileDumper::SetTableOptionsByMagicNumber( - - options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); - if (!silent_) { -- fprintf(stdout, "Sst file format: plain table\n"); -+ fprintf(out_, "Sst file format: plain table\n"); - } - } else { - char error_msg_buffer[80]; -@@ -427,15 +430,56 @@ Status SstFileDumper::SetOldTableOptions() { - assert(table_properties_ == nullptr); - options_.table_factory = std::make_shared(); - if (!silent_) { -- fprintf(stdout, "Sst file format: block-based(old version)\n"); -+ fprintf(out_, "Sst file format: block-based(old version)\n"); - } - - return Status::OK(); - } - -+void write(int value, FILE* file) { -+ char b[4]; -+ b[3] = value & 0x000000ff; -+ b[2] = (value & 0x0000ff00) >> 8; -+ b[1] = (value & 0x00ff0000) >> 16; -+ b[0] = (value & 0xff000000) >> 24; -+ std::fwrite(b, 4, 1, file); +diff --git a/include/rocksdb/raw_iterator.h b/include/rocksdb/raw_iterator.h +new file mode 100644 +index 000000000..21242ed15 +--- /dev/null ++++ b/include/rocksdb/raw_iterator.h +@@ -0,0 +1,25 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++ ++#include "rocksdb/advanced_options.h" ++namespace ROCKSDB_NAMESPACE { ++ ++class RawIterator { ++ public: ++ virtual ~RawIterator() {} ++ virtual bool Valid() const = 0; ++ virtual Slice key() const = 0; ++ virtual Slice value() const = 0; ++ virtual uint64_t sequenceNumber() const = 0; ++ virtual uint32_t type() const = 0; ++ virtual void Next() = 0; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/include/rocksdb/raw_sst_file_reader.h b/include/rocksdb/raw_sst_file_reader.h +new file mode 100644 +index 000000000..09e748208 +--- /dev/null ++++ b/include/rocksdb/raw_sst_file_reader.h +@@ -0,0 +1,62 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE ++ ++#include ++#include ++ ++#include "rocksdb/raw_iterator.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/options.h" ++ ++ ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileReader { ++ public: ++ ++ RawSstFileReader(const Options& options, const std::string& file_name, ++ size_t readahead_size, bool verify_checksum, ++ bool silent = false); ++ ~RawSstFileReader(); ++ ++ RawIterator* newIterator(bool has_from, Slice* from, ++ bool has_to, Slice *to); ++ Status getStatus() { return init_result_; } ++ ++ private: ++ // Get the TableReader implementation for the sst file ++ Status GetTableReader(const std::string& file_path); ++ Status ReadTableProperties(uint64_t table_magic_number, ++ uint64_t file_size); ++ ++ Status SetTableOptionsByMagicNumber(uint64_t table_magic_number); ++ Status SetOldTableOptions(); ++ ++ // Helper function to call the factory with settings specific to the ++ // factory implementation ++ Status NewTableReader(uint64_t file_size); ++ ++ std::string file_name_; ++ Temperature file_temp_; ++ ++ // less verbose in stdout/stderr ++ bool silent_; ++ ++ // options_ and internal_comparator_ will also be used in ++ // ReadSequential internally (specifically, seek-related operations) ++ Options options_; ++ ++ Status init_result_; ++ ++ struct Rep; ++ std::unique_ptr rep_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/src.mk b/src.mk +index b94bc43ca..c13e5cde6 100644 +--- a/src.mk ++++ b/src.mk +@@ -338,11 +338,8 @@ RANGE_TREE_SOURCES =\ + utilities/transactions/lock/range/range_tree/range_tree_lock_tracker.cc + + TOOL_LIB_SOURCES = \ +- tools/io_tracer_parser_tool.cc \ +- tools/ldb_cmd.cc \ +- tools/ldb_tool.cc \ +- tools/sst_dump_tool.cc \ +- utilities/blob_db/blob_dump_tool.cc \ ++ tools/raw_sst_file_reader.cc \ ++ tools/raw_sst_file_iterator.cc \ + + ANALYZER_LIB_SOURCES = \ + tools/block_cache_analyzer/block_cache_trace_analyzer.cc \ +diff --git a/tools/raw_sst_file_iterator.cc b/tools/raw_sst_file_iterator.cc +new file mode 100644 +index 000000000..3051637a3 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.cc +@@ -0,0 +1,76 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++ ++#include ++#include ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "rocksdb/status.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "tools/raw_sst_file_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++RawSstFileIterator::RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, Slice* from_key, ++ bool has_to, Slice* to_key) ++ : iter_(iterator), ++ ikey(new ParsedInternalKey()), ++ has_to_(has_to), ++ to_key_(to_key) { ++ if (has_from) { ++ InternalKey k; ++ k.SetMinPossibleForUserKey(*from_key); ++ iter_->Seek(k.Encode()); ++ } else { ++ iter_->SeekToFirst(); ++ } ++ initKey(); ++} ++ ++bool RawSstFileIterator::Valid() const { ++ return iter_->Valid() && (!has_to_ || ++ BytewiseComparator()->Compare( ++ key(), *to_key_) < 0); ++} ++ ++void RawSstFileIterator::initKey() { ++ if (iter_->Valid()) { ++ ParseInternalKey(iter_->key(), ikey, true /* log_err_key */); ++ } +} ++void RawSstFileIterator::Next() { ++ iter_->Next(); ++ initKey(); ++ ++} ++ ++Slice RawSstFileIterator::key() const { ++ return ikey->user_key; ++} ++ ++uint64_t RawSstFileIterator::sequenceNumber() const { ++ return ikey->sequence; ++} ++ ++uint32_t RawSstFileIterator::type() const { ++ return static_cast(ikey->type); ++} ++ ++Slice RawSstFileIterator::value() const { ++ return iter_->value(); ++} ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_iterator.h b/tools/raw_sst_file_iterator.h +new file mode 100644 +index 000000000..58e34b260 +--- /dev/null ++++ b/tools/raw_sst_file_iterator.h +@@ -0,0 +1,45 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++#pragma once ++#ifndef ROCKSDB_LITE + -+void write(const char* value, int length, FILE* file) { -+ write(length, file); -+ fwrite(value, length, 1, file); ++#include ++#include ++#include "file/writable_file_writer.h" ++#include "rocksdb/advanced_options.h" ++#include "rocksdb/raw_iterator.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++class RawSstFileIterator : public RawIterator { ++ public: ++ explicit RawSstFileIterator(InternalIterator* iterator, ++ bool has_from, ++ Slice* from_key, ++ bool has_to, ++ Slice* to_key); ++ ++ bool Valid() const override; ++ Slice key() const override; ++ Slice value() const override; ++ uint64_t sequenceNumber() const override; ++ uint32_t type() const override; ++ void Next() final override; ++ ++ ~RawSstFileIterator(){ ++ delete iter_; ++ } ++ ++ private: ++ void initKey(); ++ InternalIterator* iter_; ++ ParsedInternalKey* ikey; ++ bool has_to_; ++ Slice* to_key_; ++}; ++ ++} // namespace ROCKSDB_NAMESPACE ++ ++#endif // ROCKSDB_LITE +diff --git a/tools/raw_sst_file_reader.cc b/tools/raw_sst_file_reader.cc +new file mode 100644 +index 000000000..5ba8a82ee +--- /dev/null ++++ b/tools/raw_sst_file_reader.cc +@@ -0,0 +1,272 @@ ++// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. ++// This source code is licensed under both the GPLv2 (found in the ++// COPYING file in the root directory) and Apache 2.0 License ++// (found in the LICENSE.Apache file in the root directory). ++// ++#ifndef ROCKSDB_LITE ++ ++#include "rocksdb/raw_sst_file_reader.h" ++ ++#include ++#include ++#include ++#include ++ ++ ++#include "db/memtable.h" ++#include "db/write_batch_internal.h" ++#include "options/cf_options.h" ++#include "rocksdb/env.h" ++#include "rocksdb/slice_transform.h" ++#include "rocksdb/status.h" ++#include "rocksdb/table_properties.h" ++#include "rocksdb/utilities/ldb_cmd.h" ++#include "table/block_based/block.h" ++#include "table/block_based/block_based_table_builder.h" ++#include "table/block_based/block_based_table_factory.h" ++#include "table/format.h" ++#include "table/meta_blocks.h" ++#include "table/plain/plain_table_factory.h" ++#include "table/table_reader.h" ++#include "tools/raw_sst_file_iterator.h" ++#include "db/dbformat.h" ++ ++namespace ROCKSDB_NAMESPACE { ++ ++struct RawSstFileReader::Rep { ++ Options options; ++ EnvOptions soptions_; ++ ReadOptions read_options_; ++ ImmutableOptions ioptions_; ++ MutableCFOptions moptions_; ++ InternalKeyComparator internal_comparator_; ++ std::unique_ptr table_properties_; ++ std::unique_ptr table_reader_; ++ std::unique_ptr file_; ++ ++ Rep(const Options& opts, bool verify_checksum, size_t readahead_size) ++ : options(opts), ++ soptions_(EnvOptions()), ++ read_options_(verify_checksum, false), ++ ioptions_(options), ++ moptions_(ColumnFamilyOptions(options)), ++ internal_comparator_(InternalKeyComparator(BytewiseComparator())) { ++ read_options_.readahead_size = readahead_size; ++ } ++}; ++ ++RawSstFileReader::RawSstFileReader(const Options& options, ++ const std::string& file_name, ++ size_t readahead_size, ++ bool verify_checksum, ++ bool silent) :rep_(new Rep(options, ++ verify_checksum, ++ readahead_size)) { ++ file_name_ = file_name; ++ silent_ = silent; ++ options_ = options; ++ file_temp_ = Temperature::kUnknown; ++ init_result_ = GetTableReader(file_name_); +} + -+void write(const std::string& value, FILE* file) { -+ write(value.data(), (int)value.length(), file); ++RawSstFileReader::~RawSstFileReader() {} ++ ++ ++ ++extern const uint64_t kBlockBasedTableMagicNumber; ++extern const uint64_t kLegacyBlockBasedTableMagicNumber; ++extern const uint64_t kPlainTableMagicNumber; ++extern const uint64_t kLegacyPlainTableMagicNumber; ++ ++Status RawSstFileReader::GetTableReader(const std::string& file_path) { ++ // Warning about 'magic_number' being uninitialized shows up only in UBsan ++ // builds. Though access is guarded by 's.ok()' checks, fix the issue to ++ // avoid any warnings. ++ uint64_t magic_number = Footer::kNullTableMagicNumber; ++ ++ // read table magic number ++ Footer footer; ++ ++ const auto& fs = options_.env->GetFileSystem(); ++ std::unique_ptr file; ++ uint64_t file_size = 0; ++ FileOptions fopts = rep_->soptions_; ++ fopts.temperature = file_temp_; ++ Status s = fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ if (s.ok()) { ++ s = fs->GetFileSize(file_path, IOOptions(), &file_size, nullptr); ++ } ++ ++ // check empty file ++ // if true, skip further processing of this file ++ if (file_size == 0) { ++ return Status::Aborted(file_path, "Empty file"); ++ } ++ ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ ++ FilePrefetchBuffer prefetch_buffer( ++ 0 /* readahead_size */, 0 /* max_readahead_size */, true /* enable */, ++ false /* track_min_offset */); ++ if (s.ok()) { ++ const uint64_t kSstDumpTailPrefetchSize = 512 * 1024; ++ uint64_t prefetch_size = (file_size > kSstDumpTailPrefetchSize) ++ ? kSstDumpTailPrefetchSize ++ : file_size; ++ uint64_t prefetch_off = file_size - prefetch_size; ++ IOOptions opts; ++ s = prefetch_buffer.Prefetch(opts, rep_->file_.get(), prefetch_off, ++ static_cast(prefetch_size), ++ Env::IO_TOTAL /* rate_limiter_priority */); ++ ++ s = ReadFooterFromFile(opts, rep_->file_.get(), &prefetch_buffer, file_size, ++ &footer); ++ } ++ if (s.ok()) { ++ magic_number = footer.table_magic_number(); ++ } ++ ++ if (s.ok()) { ++ if (magic_number == kPlainTableMagicNumber || ++ magic_number == kLegacyPlainTableMagicNumber) { ++ rep_->soptions_.use_mmap_reads = true; ++ ++ fs->NewRandomAccessFile(file_path, fopts, &file, nullptr); ++ rep_->file_.reset(new RandomAccessFileReader(std::move(file), file_path)); ++ } ++ ++ s = ROCKSDB_NAMESPACE::ReadTableProperties( ++ rep_->file_.get(), file_size, magic_number, rep_->ioptions_, &(rep_->table_properties_), ++ /* memory_allocator= */ nullptr, (magic_number == kBlockBasedTableMagicNumber) ++ ? &prefetch_buffer ++ : nullptr); ++ // For old sst format, ReadTableProperties might fail but file can be read ++ if (s.ok()) { ++ s = SetTableOptionsByMagicNumber(magic_number); ++ if (s.ok()) { ++ if (rep_->table_properties_ && !rep_->table_properties_->comparator_name.empty()) { ++ ConfigOptions config_options; ++ const Comparator* user_comparator = nullptr; ++ s = Comparator::CreateFromString(config_options, ++ rep_->table_properties_->comparator_name, ++ &user_comparator); ++ if (s.ok()) { ++ assert(user_comparator); ++ rep_->internal_comparator_ = InternalKeyComparator(user_comparator); ++ } ++ } ++ } ++ } else { ++ if (!silent_) { ++ fprintf(stderr, "Not able to read table properties\n"); ++ } ++ s = SetOldTableOptions(); ++ } ++ options_.comparator = rep_->internal_comparator_.user_comparator(); ++ } ++ ++ if (s.ok()) { ++ s = NewTableReader(file_size); ++ } ++ return s; +} + -+void write(Slice &slice, FILE* file) { -+ int size = (int)slice.size(); -+ write(slice.data(), size, file); ++Status RawSstFileReader::NewTableReader(uint64_t file_size) { ++ auto t_opt = ++ TableReaderOptions(rep_->ioptions_, rep_->moptions_.prefix_extractor, rep_->soptions_, ++ rep_->internal_comparator_, false /* skip_filters */, ++ false /* imortal */, true /* force_direct_prefetch */); ++ // Allow open file with global sequence number for backward compatibility. ++ t_opt.largest_seqno = kMaxSequenceNumber; ++ ++ // We need to turn off pre-fetching of index and filter nodes for ++ // BlockBasedTable ++ if (options_.table_factory->IsInstanceOf( ++ TableFactory::kBlockBasedTableName())) { ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_), ++ /*enable_prefetch=*/false); ++ } ++ ++ // For all other factory implementation ++ return options_.table_factory->NewTableReader(t_opt, std::move(rep_->file_), ++ file_size, &(rep_->table_reader_)); +} + -+void write(SequenceNumber sequenceNumber, FILE* file) { ++Status RawSstFileReader::SetTableOptionsByMagicNumber( ++ uint64_t table_magic_number) { ++ assert(rep_->table_properties_); ++ if (table_magic_number == kBlockBasedTableMagicNumber || ++ table_magic_number == kLegacyBlockBasedTableMagicNumber) { ++ BlockBasedTableFactory* bbtf = new BlockBasedTableFactory(); ++ // To force tail prefetching, we fake reporting two useful reads of 512KB ++ // from the tail. ++ // It needs at least two data points to warm up the stats. ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ bbtf->tail_prefetch_stats()->RecordEffectiveSize(512 * 1024); ++ ++ options_.table_factory.reset(bbtf); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based\n"); ++ } ++ ++ auto& props = rep_->table_properties_->user_collected_properties; ++ auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); ++ if (pos != props.end()) { ++ auto index_type_on_file = static_cast( ++ DecodeFixed32(pos->second.c_str())); ++ if (index_type_on_file == ++ BlockBasedTableOptions::IndexType::kHashSearch) { ++ options_.prefix_extractor.reset(NewNoopTransform()); ++ } ++ } ++ } else if (table_magic_number == kPlainTableMagicNumber || ++ table_magic_number == kLegacyPlainTableMagicNumber) { ++ options_.allow_mmap_reads = true; + -+ char b[8]; -+ int idx = 7; -+ while (idx >= 0) { -+ b[idx] = sequenceNumber % 256; -+ sequenceNumber /= 256; -+ idx -= 1; ++ PlainTableOptions plain_table_options; ++ plain_table_options.user_key_len = kPlainTableVariableLength; ++ plain_table_options.bloom_bits_per_key = 0; ++ plain_table_options.hash_table_ratio = 0; ++ plain_table_options.index_sparseness = 1; ++ plain_table_options.huge_page_tlb_size = 0; ++ plain_table_options.encoding_type = kPlain; ++ plain_table_options.full_scan_mode = true; ++ ++ options_.table_factory.reset(NewPlainTableFactory(plain_table_options)); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: plain table\n"); ++ } ++ } else { ++ char error_msg_buffer[80]; ++ snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1, ++ "Unsupported table magic number --- %lx", ++ (long)table_magic_number); ++ return Status::InvalidArgument(error_msg_buffer); + } -+ fwrite(b, 8, 1, file); ++ ++ return Status::OK(); +} + -+void write(ParsedInternalKey &key, FILE* file) { -+ write(key.user_key, file); -+ write(key.sequence, file); -+ write(static_cast(key.type), file); ++Status RawSstFileReader::SetOldTableOptions() { ++ assert(rep_->table_properties_ == nullptr); ++ options_.table_factory = std::make_shared(); ++ if (!silent_) { ++ fprintf(stdout, "Sst file format: block-based(old version)\n"); ++ } ++ ++ return Status::OK(); +} + - Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, -- bool has_from, const std::string& from_key, -- bool has_to, const std::string& to_key, -+ bool has_from, const Slice& from_key, -+ bool has_to, const Slice& to_key, - bool use_from_as_prefix) { - if (!table_reader_) { - return init_result_; -@@ -446,6 +490,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - /*arena=*/nullptr, /*skip_filters=*/false, - TableReaderCaller::kSSTDumpTool); - uint64_t i = 0; -+ - if (has_from) { - InternalKey ikey; - ikey.SetMinPossibleForUserKey(from_key); -@@ -453,6 +498,7 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - } else { - iter->SeekToFirst(); - } -+ - for (; iter->Valid(); iter->Next()) { - Slice key = iter->key(); - Slice value = iter->value(); -@@ -478,22 +524,19 @@ Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num, - - if (print_kv) { - if (!decode_blob_index_ || ikey.type != kTypeBlobIndex) { -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- value.ToString(output_hex_).c_str()); -+ write(ikey, out_); -+ write(value, out_); - } else { - BlobIndex blob_index; -- - const Status s = blob_index.DecodeFrom(value); - if (!s.ok()) { -- fprintf(stderr, "%s => error decoding blob index\n", -- ikey.DebugString(true, output_hex_).c_str()); -+ write(ikey, err_); -+ write("error decoding blob index", err_); - continue; - } -- -- fprintf(stdout, "%s => %s\n", -- ikey.DebugString(true, output_hex_).c_str(), -- blob_index.DebugString(output_hex_).c_str()); -+ write(ikey, out_); -+ std::string v = blob_index.DebugString(output_hex_); -+ write(v, out_); - } - } - } -diff --git a/table/sst_file_dumper.h b/table/sst_file_dumper.h -index 7be876390..768c5b1e2 100644 ---- a/table/sst_file_dumper.h -+++ b/table/sst_file_dumper.h -@@ -22,11 +22,13 @@ class SstFileDumper { - bool verify_checksum, bool output_hex, - bool decode_blob_index, - const EnvOptions& soptions = EnvOptions(), -- bool silent = false); -+ bool silent = false, -+ FILE* out = stdout, -+ FILE* err = stderr); - - Status ReadSequential(bool print_kv, uint64_t read_num, bool has_from, -- const std::string& from_key, bool has_to, -- const std::string& to_key, -+ const Slice& from_key, bool has_to, -+ const Slice& to_key, - bool use_from_as_prefix = false); - - Status ReadTableProperties( -@@ -94,6 +96,8 @@ class SstFileDumper { - ReadOptions read_options_; - InternalKeyComparator internal_comparator_; - std::unique_ptr table_properties_; -+ FILE* out_; -+ FILE* err_; - }; - - } // namespace ROCKSDB_NAMESPACE -diff --git a/tools/sst_dump_tool.cc b/tools/sst_dump_tool.cc -index 7053366e7..8f248ddf3 100644 ---- a/tools/sst_dump_tool.cc -+++ b/tools/sst_dump_tool.cc -@@ -31,7 +31,7 @@ static const std::vector> - - namespace { - --void print_help(bool to_stderr) { -+void print_help(bool to_stderr, FILE* err_, FILE* out_) { - std::string supported_compressions; - for (CompressionType ct : GetSupportedCompressions()) { - if (!supported_compressions.empty()) { -@@ -43,7 +43,7 @@ void print_help(bool to_stderr) { - supported_compressions += str; - } - fprintf( -- to_stderr ? stderr : stdout, -+ to_stderr ? err_ : out_, - R"(sst_dump --file= [--command=check|scan|raw|recompress|identify] - --file= - Path to SST file or directory containing SST files -@@ -149,7 +149,13 @@ bool ParseIntArg(const char* arg, const std::string arg_name, - } - } // namespace - --int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { -+Slice* AssignSlicePrependedWithLength(const char* buf) { -+ long val = std::stol(buf); -+ return reinterpret_cast(val); ++RawIterator* RawSstFileReader::newIterator( ++ bool has_from, Slice* from, bool has_to, Slice* to) { ++ InternalIterator* iter = rep_->table_reader_->NewIterator( ++ rep_->read_options_, rep_->moptions_.prefix_extractor.get(), ++ /*arena=*/nullptr, /*skip_filters=*/false, ++ TableReaderCaller::kSSTDumpTool); ++ return new RawSstFileIterator(iter, has_from, from, has_to, to); ++ +} ++} // namespace ROCKSDB_NAMESPACE + -+int SSTDumpTool::Run(int argc, char const* const* argv, Options options, -+ FILE* out, FILE* err) { - std::string env_uri, fs_uri; - const char* dir_or_file = nullptr; - uint64_t read_num = std::numeric_limits::max(); -@@ -170,8 +176,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - bool has_compression_level_from = false; - bool has_compression_level_to = false; - bool has_specified_compression_types = false; -- std::string from_key; -- std::string to_key; -+ bool silent = false; -+ Slice* from_key = nullptr; -+ Slice* to_key = nullptr; - std::string block_size_str; - std::string compression_level_from_str; - std::string compression_level_to_str; -@@ -197,7 +204,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - int64_t tmp_val; - - for (int i = 1; i < argc; i++) { -- if (strncmp(argv[i], "--env_uri=", 10) == 0) { -+ if (strncmp(argv[i], "--silent", 8) == 0) { -+ silent = true; -+ } else if (strncmp(argv[i], "--env_uri=", 10) == 0) { - env_uri = argv[i] + 10; - } else if (strncmp(argv[i], "--fs_uri=", 9) == 0) { - fs_uri = argv[i] + 9; -@@ -217,13 +226,13 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - } else if (strncmp(argv[i], "--command=", 10) == 0) { - command = argv[i] + 10; - } else if (strncmp(argv[i], "--from=", 7) == 0) { -- from_key = argv[i] + 7; -+ from_key = AssignSlicePrependedWithLength(argv[i] + 7); - has_from = true; - } else if (strncmp(argv[i], "--to=", 5) == 0) { -- to_key = argv[i] + 5; -+ to_key = AssignSlicePrependedWithLength(argv[i] + 5); - has_to = true; - } else if (strncmp(argv[i], "--prefix=", 9) == 0) { -- from_key = argv[i] + 9; -+ from_key = AssignSlicePrependedWithLength( argv[i] + 9); - use_from_as_prefix = true; - } else if (strcmp(argv[i], "--show_properties") == 0) { - show_properties = true; -@@ -273,7 +282,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - std::cerr << pik_status.getState() << "\n"; - retc = -1; - } -- fprintf(stdout, "key=%s\n", ikey.DebugString(true, true).c_str()); -+ fprintf(out, "key=%s\n", ikey.DebugString(true, true).c_str()); - return retc; - } else if (ParseIntArg(argv[i], "--compression_level_from=", - "compression_level_from must be numeric", -@@ -288,9 +297,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, "compression_max_dict_bytes must be a uint32_t: '%s'\n", -+ fprintf(err, "compression_max_dict_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_bytes = static_cast(tmp_val); -@@ -298,10 +307,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_zstd_max_train_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0 || tmp_val > std::numeric_limits::max()) { -- fprintf(stderr, -+ fprintf(err, - "compression_zstd_max_train_bytes must be a uint32_t: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_zstd_max_train_bytes = static_cast(tmp_val); -@@ -309,56 +318,56 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - "compression_max_dict_buffer_bytes must be numeric", - &tmp_val)) { - if (tmp_val < 0) { -- fprintf(stderr, -+ fprintf(err, - "compression_max_dict_buffer_bytes must be positive: '%s'\n", - argv[i]); -- print_help(/*to_stderr*/ true); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - compression_max_dict_buffer_bytes = static_cast(tmp_val); - } else if (strcmp(argv[i], "--compression_use_zstd_finalize_dict") == 0) { - compression_use_zstd_finalize_dict = true; - } else if (strcmp(argv[i], "--help") == 0) { -- print_help(/*to_stderr*/ false); -+ print_help(/*to_stderr*/ false, err, out); - return 0; - } else if (strcmp(argv[i], "--version") == 0) { - printf("%s\n", GetRocksBuildInfoAsString("sst_dump").c_str()); - return 0; - } else { -- fprintf(stderr, "Unrecognized argument '%s'\n\n", argv[i]); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "Unrecognized argument '%s'\n\n", argv[i]); -+ print_help(/*to_stderr*/ true, err, out); - return 1; - } - } - - if(has_compression_level_from && has_compression_level_to) { - if(!has_specified_compression_types || compression_types.size() != 1) { -- fprintf(stderr, "Specify one compression type.\n\n"); -+ fprintf(err, "Specify one compression type.\n\n"); - exit(1); - } - } else if(has_compression_level_from || has_compression_level_to) { -- fprintf(stderr, "Specify both --compression_level_from and " -+ fprintf(err, "Specify both --compression_level_from and " - "--compression_level_to.\n\n"); - exit(1); - } - - if (use_from_as_prefix && has_from) { -- fprintf(stderr, "Cannot specify --prefix and --from\n\n"); -+ fprintf(err, "Cannot specify --prefix and --from\n\n"); - exit(1); - } - - if (input_key_hex) { - if (has_from || use_from_as_prefix) { -- from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key); -+ *from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key -> ToString()); - } - if (has_to) { -- to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key); -+ *to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key->ToString()); - } - } - - if (dir_or_file == nullptr) { -- fprintf(stderr, "file or directory must be specified.\n\n"); -- print_help(/*to_stderr*/ true); -+ fprintf(err, "file or directory must be specified.\n\n"); -+ print_help(/*to_stderr*/ true, err, out); - exit(1); - } - -@@ -373,10 +382,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = Env::CreateFromUri(config_options, env_uri, fs_uri, &options.env, - &env_guard); - if (!s.ok()) { -- fprintf(stderr, "CreateEnvFromUri: %s\n", s.ToString().c_str()); -+ fprintf(err, "CreateEnvFromUri: %s\n", s.ToString().c_str()); - exit(1); -- } else { -- fprintf(stdout, "options.env is %p\n", options.env); -+ } else if (!silent){ -+ fprintf(out, "options.env is %p\n", options.env); - } - } - -@@ -390,7 +399,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - Status s = env->FileExists(dir_or_file); - // dir_or_file does not exist - if (!s.ok()) { -- fprintf(stderr, "%s%s: No such file or directory\n", s.ToString().c_str(), -+ fprintf(err, "%s%s: No such file or directory\n", s.ToString().c_str(), - dir_or_file); - return 1; - } -@@ -421,10 +430,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - ROCKSDB_NAMESPACE::SstFileDumper dumper( - options, filename, Temperature::kUnknown, readahead_size, -- verify_checksum, output_hex, decode_blob_index); -+ verify_checksum, output_hex, decode_blob_index, EnvOptions(), -+ silent, out, err); - // Not a valid SST - if (!dumper.getStatus().ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - dumper.getStatus().ToString().c_str()); - continue; - } else { -@@ -433,10 +443,11 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // where there is at least one valid SST - if (valid_sst_files.size() == 1) { - // from_key and to_key are only used for "check", "scan", or "" -- if (command == "check" || command == "scan" || command == "") { -- fprintf(stdout, "from [%s] to [%s]\n", -- ROCKSDB_NAMESPACE::Slice(from_key).ToString(true).c_str(), -- ROCKSDB_NAMESPACE::Slice(to_key).ToString(true).c_str()); -+ if (!silent && (command == "check" || command == "scan" || -+ command == "")) { -+ fprintf(out, "from [%s] to [%s]\n", -+ from_key->ToString(true).c_str(), -+ to_key->ToString(true).c_str()); - } - } - } -@@ -449,7 +460,7 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - compression_zstd_max_train_bytes, compression_max_dict_buffer_bytes, - !compression_use_zstd_finalize_dict); - if (!st.ok()) { -- fprintf(stderr, "Failed to recompress: %s\n", st.ToString().c_str()); -+ fprintf(err, "Failed to recompress: %s\n", st.ToString().c_str()); - exit(1); - } - return 0; -@@ -461,10 +472,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - - st = dumper.DumpTable(out_filename); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); - exit(1); - } else { -- fprintf(stdout, "raw dump written to file %s\n", &out_filename[0]); -+ fprintf(out, "raw dump written to file %s\n", &out_filename[0]); - } - continue; - } -@@ -473,10 +484,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "" || command == "scan" || command == "check") { - st = dumper.ReadSequential( - command == "scan", read_num > 0 ? (read_num - total_read) : read_num, -- has_from || use_from_as_prefix, from_key, has_to, to_key, -+ has_from || use_from_as_prefix, *from_key, has_to, *to_key, - use_from_as_prefix); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), -+ fprintf(err, "%s: %s\n", filename.c_str(), - st.ToString().c_str()); - } - total_read += dumper.GetReadNumber(); -@@ -488,10 +499,10 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - if (command == "verify") { - st = dumper.VerifyChecksum(); - if (!st.ok()) { -- fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(), -+ fprintf(err, "%s is corrupted: %s\n", filename.c_str(), - st.ToString().c_str()); - } else { -- fprintf(stdout, "The file is ok\n"); -+ fprintf(out, "The file is ok\n"); - } - continue; - } -@@ -503,15 +514,15 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - table_properties_from_reader; - st = dumper.ReadTableProperties(&table_properties_from_reader); - if (!st.ok()) { -- fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -- fprintf(stderr, "Try to use initial table properties\n"); -+ fprintf(err, "%s: %s\n", filename.c_str(), st.ToString().c_str()); -+ fprintf(err, "Try to use initial table properties\n"); - table_properties = dumper.GetInitTableProperties(); - } else { - table_properties = table_properties_from_reader.get(); - } - if (table_properties != nullptr) { - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Table Properties:\n" - "------------------------------\n" - " %s", -@@ -523,18 +534,18 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - total_index_block_size += table_properties->index_size; - total_filter_block_size += table_properties->filter_size; - if (show_properties) { -- fprintf(stdout, -+ fprintf(out, - "Raw user collected properties\n" - "------------------------------\n"); - for (const auto& kv : table_properties->user_collected_properties) { - std::string prop_name = kv.first; - std::string prop_val = Slice(kv.second).ToString(true); -- fprintf(stdout, " # %s: 0x%s\n", prop_name.c_str(), -+ fprintf(out, " # %s: 0x%s\n", prop_name.c_str(), - prop_val.c_str()); - } - } - } else { -- fprintf(stderr, "Reader unexpectedly returned null properties\n"); -+ fprintf(err, "Reader unexpectedly returned null properties\n"); - } - } - } -@@ -555,9 +566,9 @@ int SSTDumpTool::Run(int argc, char const* const* argv, Options options) { - // Exit with an error state - if (dir) { - fprintf(stdout, "------------------------------\n"); -- fprintf(stderr, "No valid SST files found in %s\n", dir_or_file); -+ fprintf(err, "No valid SST files found in %s\n", dir_or_file); - } else { -- fprintf(stderr, "%s is not a valid SST file\n", dir_or_file); -+ fprintf(err, "%s is not a valid SST file\n", dir_or_file); - } - return 1; - } else { ++#endif // ROCKSDB_LITE diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java index 8fc4e83e7a1d..8f18f8d1e423 100644 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/TestNativeLibraryLoader.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.utils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.ozone.test.tag.Native; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; @@ -56,37 +57,27 @@ private static Stream nativeLibraryDirectoryLocations() { @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @MethodSource("nativeLibraryDirectoryLocations") - public void testNativeLibraryLoader( - String nativeLibraryDirectoryLocation) { + public void testNativeLibraryLoader(String nativeLibraryDirectoryLocation) throws NativeLibraryNotLoadedException { Map libraryLoadedMap = new HashMap<>(); NativeLibraryLoader loader = new NativeLibraryLoader(libraryLoadedMap); - try (MockedStatic mockedNativeLibraryLoader = - mockStatic(NativeLibraryLoader.class, - CALLS_REAL_METHODS)) { - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) + try (MockedStatic mockedNativeLibraryLoader = mockStatic(NativeLibraryLoader.class, + CALLS_REAL_METHODS)) { + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getSystemProperty(same(NATIVE_LIB_TMP_DIR))) .thenReturn(nativeLibraryDirectoryLocation); - mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()) - .thenReturn(loader); - assertTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - assertTrue(NativeLibraryLoader - .isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getInstance()).thenReturn(loader); + ManagedRawSSTFileReader.loadLibrary(); + assertTrue(NativeLibraryLoader.isLibraryLoaded(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); // Mocking to force copy random bytes to create a lib file to // nativeLibraryDirectoryLocation. But load library will fail. - mockedNativeLibraryLoader.when(() -> - NativeLibraryLoader.getResourceStream(anyString())) + mockedNativeLibraryLoader.when(() -> NativeLibraryLoader.getResourceStream(anyString())) .thenReturn(new ByteArrayInputStream(new byte[]{0, 1, 2, 3})); String dummyLibraryName = "dummy_lib"; NativeLibraryLoader.getInstance().loadLibrary(dummyLibraryName); NativeLibraryLoader.isLibraryLoaded(dummyLibraryName); // Checking if the resource with random was copied to a temp file. - File[] libPath = - new File(nativeLibraryDirectoryLocation == null ? "" : - nativeLibraryDirectoryLocation) - .getAbsoluteFile().listFiles((dir, name) -> - name.startsWith(dummyLibraryName) && - name.endsWith(NativeLibraryLoader.getLibOsSuffix())); + File[] libPath = new File(nativeLibraryDirectoryLocation == null ? "" : nativeLibraryDirectoryLocation) + .getAbsoluteFile().listFiles((dir, name) -> name.startsWith(dummyLibraryName) && + name.endsWith(NativeLibraryLoader.getLibOsSuffix())); assertNotNull(libPath); assertEquals(1, libPath.length); assertTrue(libPath[0].delete()); diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java new file mode 100644 index 000000000000..00816e60d7f2 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedRawSSTFileIterator.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.utils.db.managed; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.TestUtils; +import org.apache.ozone.test.tag.Native; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.TreeMap; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test for ManagedRawSSTFileReaderIterator. + */ +@Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) +class TestManagedRawSSTFileIterator { + + @TempDir + private Path tempDir; + + private File createSSTFileWithKeys( + TreeMap, String> keys) throws Exception { + File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); + try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); + ManagedOptions managedOptions = new ManagedOptions(); + ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter(envOptions, managedOptions)) { + sstFileWriter.open(file.getAbsolutePath()); + for (Map.Entry, String> entry : keys.entrySet()) { + if (entry.getKey().getValue() == 0) { + sstFileWriter.delete(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8)); + } else { + sstFileWriter.put(entry.getKey().getKey().getBytes(StandardCharsets.UTF_8), + entry.getValue().getBytes(StandardCharsets.UTF_8)); + } + } + sstFileWriter.finish(); + } + return file; + } + + private static Stream keyValueFormatArgs() { + return Stream.of(Arguments.of(Named.of("Key starting with a single quote", "'key%1$d=>"), + Named.of("Value starting with a number ending with a single quote", "%1$dvalue'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number", "%1$dvalue%1$d")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key starting with a single quote & ending with a number", "'key%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$d'value%1$d'")), + Arguments.of(Named.of("Key ending with a number", "key%1$d"), + Named.of("Value starting & ending with a number & containing null character & new line character", + "%1$dvalue\n\0%1$d")), + Arguments.of(Named.of("Key ending with a number & containing a null character", "key\0%1$d"), + Named.of("Value starting & ending with a number & elosed within quotes", "%1$dvalue\r%1$d"))); + } + + @BeforeAll + public static void init() throws NativeLibraryNotLoadedException { + ManagedRawSSTFileReader.loadLibrary(); + } + + + @ParameterizedTest + @MethodSource("keyValueFormatArgs") + public void testSSTDumpIteratorWithKeyFormat(String keyFormat, String valueFormat) throws Exception { + TreeMap, String> keys = IntStream.range(0, 100).boxed().collect(Collectors.toMap( + i -> Pair.of(String.format(keyFormat, i), i % 2), + i -> i % 2 == 0 ? "" : String.format(valueFormat, i), + (v1, v2) -> v2, + TreeMap::new)); + File file = createSSTFileWithKeys(keys); + try (ManagedOptions options = new ManagedOptions(); + ManagedRawSSTFileReader reader = new ManagedRawSSTFileReader<>( + options, file.getAbsolutePath(), 2 * 1024 * 1024)) { + List> testBounds = TestUtils.getTestingBounds(keys.keySet().stream() + .collect(Collectors.toMap(Pair::getKey, Pair::getValue, (v1, v2) -> v1, TreeMap::new))); + for (Optional keyStart : testBounds) { + for (Optional keyEnd : testBounds) { + Map, String> expectedKeys = keys.entrySet().stream() + .filter(e -> keyStart.map(s -> e.getKey().getKey().compareTo(s) >= 0).orElse(true)) + .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0).orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + Optional lowerBound = keyStart.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + Optional upperBound = keyEnd.map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); + try (ManagedRawSSTFileIterator iterator + = reader.newIterator(Function.identity(), lowerBound.orElse(null), upperBound.orElse(null))) { + while (iterator.hasNext()) { + ManagedRawSSTFileIterator.KeyValue r = iterator.next(); + String key = StringUtils.bytes2String(r.getKey()); + Pair recordKey = Pair.of(key, r.getType()); + assertThat(expectedKeys).containsKey(recordKey); + assertEquals(Optional.ofNullable(expectedKeys.get(recordKey)).orElse(""), + StringUtils.bytes2String(r.getValue())); + expectedKeys.remove(recordKey); + } + assertEquals(0, expectedKeys.size()); + } finally { + lowerBound.ifPresent(ManagedSlice::close); + upperBound.ifPresent(ManagedSlice::close); + } + } + } + } + } +} diff --git a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java deleted file mode 100644 index d2796c19fc50..000000000000 --- a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.utils.db.managed; - -import com.google.common.primitives.Bytes; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; -import org.apache.hadoop.hdds.utils.TestUtils; -import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; -import org.junit.jupiter.api.Named; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.TreeMap; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.Stream; - -import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assumptions.assumeTrue; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -/** - * Test for ManagedSSTDumpIterator. - */ -class TestManagedSSTDumpIterator { - - @TempDir - private Path tempDir; - - private File createSSTFileWithKeys( - TreeMap, String> keys) throws Exception { - File file = Files.createFile(tempDir.resolve("tmp_sst_file.sst")).toFile(); - try (ManagedEnvOptions envOptions = new ManagedEnvOptions(); - ManagedOptions managedOptions = new ManagedOptions(); - ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter( - envOptions, managedOptions)) { - sstFileWriter.open(file.getAbsolutePath()); - for (Map.Entry, String> entry : keys.entrySet()) { - if (entry.getKey().getValue() == 0) { - sstFileWriter.delete(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8)); - } else { - sstFileWriter.put(entry.getKey().getKey() - .getBytes(StandardCharsets.UTF_8), - entry.getValue().getBytes(StandardCharsets.UTF_8)); - } - } - sstFileWriter.finish(); - } - return file; - } - - private static Stream keyValueFormatArgs() { - return Stream.of( - Arguments.of( - Named.of("Key starting with a single quote", - "'key%1$d=>"), - Named.of("Value starting with a number ending with a" + - " single quote", "%1$dvalue'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number", "%1$dvalue%1$d") - ), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'")), - Arguments.of( - Named.of("Key starting with a single quote & ending" + - " with a number", "'key%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$d'value%1$d'") - ), - Arguments.of( - Named.of("Key ending with a number", "key%1$d"), - Named.of("Value starting & ending with a number " + - "& containing null character & new line character", - "%1$dvalue\n\0%1$d") - ), - Arguments.of( - Named.of("Key ending with a number & containing" + - " a null character", "key\0%1$d"), - Named.of("Value starting & ending with a number " + - "& elosed within quotes", "%1$dvalue\r%1$d") - ) - ); - } - - private static byte[] getBytes(Integer val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(4); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putInt(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(Long val) { - ByteBuffer destByteBuffer = ByteBuffer.allocate(8); - destByteBuffer.order(ByteOrder.BIG_ENDIAN); - destByteBuffer.putLong(val); - return destByteBuffer.array(); - } - - private static byte[] getBytes(String val) { - byte[] b = new byte[val.length()]; - for (int i = 0; i < val.length(); i++) { - b[i] = (byte) val.charAt(i); - } - return b; - } - - private static Stream invalidPipeInputStreamBytes() { - return Stream.of( - Arguments.of(Named.of("Invalid 3 byte integer", - new byte[]{0, 0, 0})), - Arguments.of(Named.of("Invalid 2 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid 1 byte integer", - new byte[]{0, 0})), - Arguments.of(Named.of("Invalid key name length", - Bytes.concat(getBytes(4), getBytes("key")))), - Arguments.of(Named.of("Invalid Unsigned Long length", - Bytes.concat(getBytes(4), getBytes("key1"), - new byte[]{0, 0}))), - Arguments.of(Named.of("Invalid Sequence number", - Bytes.concat(getBytes(4), getBytes("key1")))), - Arguments.of(Named.of("Invalid Type", - Bytes.concat(getBytes(4), getBytes("key1"), - getBytes(4L)))), - Arguments.of(Named.of("Invalid Value", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(0)))), - Arguments.of(Named.of("Invalid Value length", - Bytes.concat(getBytes(4), getBytes("key"), - getBytes(4L), getBytes(1), getBytes(6), - getBytes("val")))) - ); - } - - @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) - @ParameterizedTest - @MethodSource("keyValueFormatArgs") - @Unhealthy("HDDS-9274") - public void testSSTDumpIteratorWithKeyFormat(String keyFormat, - String valueFormat) - throws Exception { - assumeTrue(NativeLibraryLoader.getInstance().loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); - - TreeMap, String> keys = - IntStream.range(0, 100).boxed().collect( - Collectors.toMap( - i -> Pair.of(String.format(keyFormat, i), i % 2), - i -> i % 2 == 0 ? "" : String.format(valueFormat, i), - (v1, v2) -> v2, - TreeMap::new)); - File file = createSSTFileWithKeys(keys); - ExecutorService executorService = - new ThreadPoolExecutor(1, 1, 0, TimeUnit.SECONDS, - new ArrayBlockingQueue<>(1), - new ThreadPoolExecutor.CallerRunsPolicy()); - ManagedSSTDumpTool tool = new ManagedSSTDumpTool(executorService, 8192); - List> testBounds = TestUtils.getTestingBounds( - keys.keySet().stream().collect(Collectors.toMap(Pair::getKey, - Pair::getValue, (v1, v2) -> v1, TreeMap::new))); - for (Optional keyStart : testBounds) { - for (Optional keyEnd : testBounds) { - Map, String> expectedKeys = keys.entrySet() - .stream().filter(e -> keyStart.map(s -> e.getKey().getKey() - .compareTo(s) >= 0).orElse(true)) - .filter(e -> keyEnd.map(s -> e.getKey().getKey().compareTo(s) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - Optional lowerBound = keyStart - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - Optional upperBound = keyEnd - .map(s -> new ManagedSlice(StringUtils.string2Bytes(s))); - try (ManagedOptions options = new ManagedOptions(); - ManagedSSTDumpIterator iterator = - new ManagedSSTDumpIterator(tool, - file.getAbsolutePath(), options, lowerBound.orElse(null), - upperBound.orElse(null)) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - } - ) { - while (iterator.hasNext()) { - ManagedSSTDumpIterator.KeyValue r = iterator.next(); - String key = new String(r.getKey(), StandardCharsets.UTF_8); - Pair recordKey = Pair.of(key, r.getType()); - assertThat(expectedKeys).containsKey(recordKey); - assertEquals(Optional.ofNullable(expectedKeys - .get(recordKey)).orElse(""), - new String(r.getValue(), StandardCharsets.UTF_8)); - expectedKeys.remove(recordKey); - } - assertEquals(0, expectedKeys.size()); - } finally { - lowerBound.ifPresent(ManagedSlice::close); - upperBound.ifPresent(ManagedSlice::close); - } - } - } - executorService.shutdown(); - } - - - @ParameterizedTest - @MethodSource("invalidPipeInputStreamBytes") - public void testInvalidSSTDumpIteratorWithKeyFormat(byte[] inputBytes) - throws ExecutionException, - InterruptedException, IOException { - ByteArrayInputStream byteArrayInputStream = - new ByteArrayInputStream(inputBytes); - ManagedSSTDumpTool tool = mock(ManagedSSTDumpTool.class); - File file = Files.createFile(tempDir.resolve("tmp_file.sst")).toFile(); - Future future = mock(Future.class); - when(future.isDone()).thenReturn(false); - when(future.get()).thenReturn(0); - when(tool.run(any(Map.class), - any(ManagedOptions.class))) - .thenReturn(new ManagedSSTDumpTool.SSTDumpToolTask(future, - byteArrayInputStream)); - try (ManagedOptions options = new ManagedOptions()) { - assertThrows(IllegalStateException.class, - () -> new ManagedSSTDumpIterator( - tool, file.getAbsolutePath(), options) { - @Override - protected KeyValue getTransformedValue( - Optional value) { - return value.orElse(null); - } - }); - } - } -} diff --git a/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties new file mode 100644 index 000000000000..959da047fb7f --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/auditlog.properties @@ -0,0 +1,76 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with this +# work for additional information regarding copyright ownership. The ASF +# licenses this file to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +#

+# http://www.apache.org/licenses/LICENSE-2.0 +#

+# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +name=PropertiesConfig + +# Checks for config change periodically and reloads +monitorInterval=5 + +filter=read, write +# filter.read.onMatch = DENY avoids logging all READ events +# filter.read.onMatch = ACCEPT permits logging all READ events +# The above two settings ignore the log levels in configuration +# filter.read.onMatch = NEUTRAL permits logging of only those READ events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.read.type = MarkerFilter +filter.read.marker = READ +filter.read.onMatch = NEUTRAL +filter.read.onMismatch = NEUTRAL + +# filter.write.onMatch = DENY avoids logging all WRITE events +# filter.write.onMatch = ACCEPT permits logging all WRITE events +# The above two settings ignore the log levels in configuration +# filter.write.onMatch = NEUTRAL permits logging of only those WRITE events +# which are attempted at log level equal or greater than log level specified +# in the configuration +filter.write.type = MarkerFilter +filter.write.marker = WRITE +filter.write.onMatch = NEUTRAL +filter.write.onMismatch = NEUTRAL + +# Log Levels are organized from most specific to least: +# OFF (most specific, no logging) +# FATAL (most specific, little data) +# ERROR +# WARN +# INFO +# DEBUG +# TRACE (least specific, a lot of data) +# ALL (least specific, all data) + +appenders = console, audit +appender.console.type = Console +appender.console.name = STDOUT +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %-5level | %c{1} | %msg%n + +appender.audit.type = File +appender.audit.name = AUDITLOG +appender.audit.fileName=audit.log +appender.audit.layout.type=PatternLayout +appender.audit.layout.pattern= %-5level | %c{1} | %C | %msg%n + +loggers=audit +logger.audit.type=AsyncLogger +logger.audit.name=OMAudit +logger.audit.level = INFO +logger.audit.appenderRefs = audit +logger.audit.appenderRef.file.ref = AUDITLOG + +rootLogger.level = INFO +rootLogger.appenderRefs = stdout +rootLogger.appenderRef.stdout.ref = STDOUT diff --git a/hadoop-hdds/rocks-native/src/test/resources/log4j.properties b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties new file mode 100644 index 000000000000..398786689af3 --- /dev/null +++ b/hadoop-hdds/rocks-native/src/test/resources/log4j.properties @@ -0,0 +1,23 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# log4j configuration used during build and unit tests + +log4j.rootLogger=INFO,stdout +log4j.threshold=ALL +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java index be949cd4fbdd..913eeb73384a 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/SstFileSetReader.java @@ -20,12 +20,12 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedSlice; import org.apache.hadoop.util.ClosableIterator; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedReadOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpIterator; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; import org.rocksdb.SstFileReader; @@ -37,9 +37,9 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; -import java.util.Optional; import java.util.Spliterator; import java.util.Spliterators; +import java.util.function.Function; import java.util.stream.Stream; import java.util.stream.StreamSupport; @@ -91,111 +91,96 @@ public long getEstimatedTotalKeys() throws RocksDBException { } public Stream getKeyStream(String lowerBound, - String upperBound) throws RocksDBException { + String upperBound) throws RocksDBException { // TODO: [SNAPSHOT] Check if default Options and ReadOptions is enough. - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - private ManagedOptions options; - private ReadOptions readOptions; - - private ManagedSlice lowerBoundSLice; - - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - this.readOptions = new ManagedReadOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSLice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - readOptions.setIterateLowerBound(lowerBoundSLice); - } - - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - readOptions.setIterateUpperBound(upperBoundSlice); - } - } + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + private ManagedOptions options; + private ReadOptions readOptions; + + private ManagedSlice lowerBoundSLice; + + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + this.readOptions = new ManagedReadOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSLice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + readOptions.setIterateLowerBound(lowerBoundSLice); + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException { - return new ManagedSstFileIterator(file, options, readOptions) { - @Override - protected String getIteratorValue( - SstFileReaderIterator iterator) { - return new String(iterator.key(), UTF_8); - } - }; - } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + readOptions.setIterateUpperBound(upperBoundSlice); + } + } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException { + return new ManagedSstFileIterator(file, options, readOptions) { @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - readOptions.close(); - IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + protected String getIteratorValue( + SstFileReaderIterator iterator) { + return new String(iterator.key(), UTF_8); } }; + } + + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + readOptions.close(); + IOUtils.closeQuietly(lowerBoundSLice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - public Stream getKeyStreamWithTombstone( - ManagedSSTDumpTool sstDumpTool, String lowerBound, - String upperBound) throws RocksDBException { - final MultipleSstFileIterator itr = - new MultipleSstFileIterator(sstFiles) { - //TODO: [SNAPSHOT] Check if default Options is enough. - private ManagedOptions options; - private ManagedSlice lowerBoundSlice; - private ManagedSlice upperBoundSlice; - - @Override - protected void init() { - this.options = new ManagedOptions(); - if (Objects.nonNull(lowerBound)) { - this.lowerBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(lowerBound)); - } - if (Objects.nonNull(upperBound)) { - this.upperBoundSlice = new ManagedSlice( - StringUtils.string2Bytes(upperBound)); - } - } + public Stream getKeyStreamWithTombstone(String lowerBound, String upperBound) throws RocksDBException { + final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { + //TODO: [SNAPSHOT] Check if default Options is enough. + private ManagedOptions options; + private ManagedSlice lowerBoundSlice; + private ManagedSlice upperBoundSlice; + + @Override + protected void init() { + this.options = new ManagedOptions(); + if (Objects.nonNull(lowerBound)) { + this.lowerBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(lowerBound)); + } + if (Objects.nonNull(upperBound)) { + this.upperBoundSlice = new ManagedSlice( + StringUtils.string2Bytes(upperBound)); + } + } - @Override - protected ClosableIterator getKeyIteratorForFile(String file) - throws IOException { - return new ManagedSSTDumpIterator(sstDumpTool, file, - options, lowerBoundSlice, upperBoundSlice) { - @Override - protected String getTransformedValue(Optional value) { - return value.map(v -> StringUtils.bytes2String(v.getKey())) - .orElse(null); - } - }; - } + @Override + protected ClosableIterator getKeyIteratorForFile(String file) { + return new ManagedRawSstFileIterator(file, options, lowerBoundSlice, upperBoundSlice, + keyValue -> StringUtils.bytes2String(keyValue.getKey())); + } - @Override - public void close() throws UncheckedIOException { - super.close(); - options.close(); - IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); - } - }; + @Override + public void close() throws UncheckedIOException { + super.close(); + options.close(); + IOUtils.closeQuietly(lowerBoundSlice, upperBoundSlice); + } + }; return getStreamFromIterator(itr); } - private abstract static class ManagedSstFileIterator implements - ClosableIterator { - private SstFileReader fileReader; - private SstFileReaderIterator fileReaderIterator; + private abstract static class ManagedSstFileIterator implements ClosableIterator { + private final SstFileReader fileReader; + private final SstFileReaderIterator fileReaderIterator; - ManagedSstFileIterator(String path, ManagedOptions options, - ReadOptions readOptions) - throws RocksDBException { + ManagedSstFileIterator(String path, ManagedOptions options, ReadOptions readOptions) throws RocksDBException { this.fileReader = new SstFileReader(options); this.fileReader.open(path); this.fileReaderIterator = fileReader.newIterator(readOptions); @@ -223,8 +208,35 @@ public String next() { } } - private abstract static class MultipleSstFileIterator implements - ClosableIterator { + private static class ManagedRawSstFileIterator implements ClosableIterator { + private final ManagedRawSSTFileReader fileReader; + private final ManagedRawSSTFileIterator fileReaderIterator; + private static final int READ_AHEAD_SIZE = 2 * 1024 * 1024; + + ManagedRawSstFileIterator(String path, ManagedOptions options, ManagedSlice lowerBound, ManagedSlice upperBound, + Function keyValueFunction) { + this.fileReader = new ManagedRawSSTFileReader<>(options, path, READ_AHEAD_SIZE); + this.fileReaderIterator = fileReader.newIterator(keyValueFunction, lowerBound, upperBound); + } + + @Override + public void close() { + this.fileReaderIterator.close(); + this.fileReader.close(); + } + + @Override + public boolean hasNext() { + return fileReaderIterator.hasNext(); + } + + @Override + public String next() { + return fileReaderIterator.next(); + } + } + + private abstract static class MultipleSstFileIterator implements ClosableIterator { private final Iterator fileNameIterator; @@ -238,16 +250,13 @@ private MultipleSstFileIterator(Collection files) { protected abstract void init(); - protected abstract ClosableIterator getKeyIteratorForFile(String file) - throws RocksDBException, - IOException; + protected abstract ClosableIterator getKeyIteratorForFile(String file) throws RocksDBException, IOException; @Override public boolean hasNext() { try { do { - if (Objects.nonNull(currentFileIterator) && - currentFileIterator.hasNext()) { + if (Objects.nonNull(currentFileIterator) && currentFileIterator.hasNext()) { return true; } } while (moveToNextFile()); diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java index edc491e7c8da..1031992f3b5d 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestSstFileSetReader.java @@ -17,18 +17,15 @@ */ package org.apache.ozone.rocksdb.util; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.TestUtils; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -42,10 +39,6 @@ import java.util.Optional; import java.util.SortedMap; import java.util.TreeMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.SynchronousQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -164,51 +157,38 @@ public void testGetKeyStream(int numberOfFiles) @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @ParameterizedTest @ValueSource(ints = {0, 1, 2, 3, 7, 10}) - @Unhealthy("HDDS-9274") public void testGetKeyStreamWithTombstone(int numberOfFiles) throws RocksDBException, IOException, NativeLibraryNotLoadedException { - assumeTrue(NativeLibraryLoader.getInstance() - .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME)); + assumeTrue(ManagedRawSSTFileReader.loadLibrary()); Pair, List> data = createDummyData(numberOfFiles); List files = data.getRight(); SortedMap keys = data.getLeft(); - ExecutorService executorService = new ThreadPoolExecutor(0, - 2, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), new ThreadPoolExecutor.DiscardPolicy()); - ManagedSSTDumpTool sstDumpTool = - new ManagedSSTDumpTool(executorService, 256); // Getting every possible combination of 2 elements from the sampled keys. // Reading the sst file lying within the given bounds and // validating the keys read from the sst file. List> bounds = TestUtils.getTestingBounds(keys); - try { - for (Optional lowerBound : bounds) { - for (Optional upperBound : bounds) { - // Calculating the expected keys which lie in the given boundary. - Map keysInBoundary = - keys.entrySet().stream().filter(entry -> lowerBound - .map(l -> entry.getKey().compareTo(l) >= 0) - .orElse(true) && - upperBound.map(u -> entry.getKey().compareTo(u) < 0) - .orElse(true)) - .collect(Collectors.toMap(Map.Entry::getKey, - Map.Entry::getValue)); - try (Stream keyStream = new SstFileSetReader(files) - .getKeyStreamWithTombstone(sstDumpTool, lowerBound.orElse(null), - upperBound.orElse(null))) { - keyStream.forEach( - key -> { - assertNotNull(keysInBoundary.remove(key)); - }); - } - assertEquals(0, keysInBoundary.size()); + for (Optional lowerBound : bounds) { + for (Optional upperBound : bounds) { + // Calculating the expected keys which lie in the given boundary. + Map keysInBoundary = + keys.entrySet().stream().filter(entry -> lowerBound + .map(l -> entry.getKey().compareTo(l) >= 0) + .orElse(true) && + upperBound.map(u -> entry.getKey().compareTo(u) < 0) + .orElse(true)) + .collect(Collectors.toMap(Map.Entry::getKey, + Map.Entry::getValue)); + try (Stream keyStream = new SstFileSetReader(files) + .getKeyStreamWithTombstone(lowerBound.orElse(null), + upperBound.orElse(null))) { + keyStream.forEach( + key -> { + assertNotNull(keysInBoundary.remove(key)); + }); } + assertEquals(0, keysInBoundary.size()); } - } finally { - executorService.shutdown(); } } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index faa5096baf98..ff3a7beee509 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -28,21 +28,9 @@ * Ozone Manager Constants. */ public final class OMConfigKeys { - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE = - "ozone.om.snapshot.sst_dumptool.pool.size"; - public static final int - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT = 1; - public static final String OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB = "ozone.om.snapshot.load.native.lib"; public static final boolean OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT = true; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE = - "ozone.om.snapshot.sst_dumptool.buffer.size"; - public static final String - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT = "8KB"; - /** * Never constructed. */ diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index 1eeca5c0f3d9..89206b3bdf2f 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -19,20 +19,5 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native -zlib_version=$(mvn -N help:evaluate -Dexpression=zlib.version -q -DforceStdout) -if [[ -z "${zlib_version}" ]]; then - echo "ERROR zlib.version not defined in pom.xml" - exit 1 -fi - -bzip2_version=$(mvn -N help:evaluate -Dexpression=bzip2.version -q -DforceStdout) -if [[ -z "${bzip2_version}" ]]; then - echo "ERROR bzip2.version not defined in pom.xml" - exit 1 -fi - -source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ - -Dbzip2.url="https://github.com/libarchive/bzip2/archive/refs/tags/bzip2-${bzip2_version}.tar.gz" \ - -Dzlib.url="https://github.com/madler/zlib/releases/download/v${zlib_version}/zlib-${zlib_version}.tar.gz" \ - -DexcludedGroups="unhealthy" \ +source "${DIR}/junit.sh" -Pnative -Drocks_tools_native -DexcludedGroups="unhealthy" \ "$@" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java index 5ed2f848aed8..06fbebb2efa2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFsoWithNativeLib.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.snapshot; import org.apache.ozone.test.tag.Native; -import org.apache.ozone.test.tag.Unhealthy; import org.junit.jupiter.api.Timeout; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -30,7 +29,6 @@ */ @Native(ROCKS_TOOLS_NATIVE_LIBRARY_NAME) @Timeout(300) -@Unhealthy("HDDS-10149") class TestOmSnapshotFsoWithNativeLib extends TestOmSnapshot { TestOmSnapshotFsoWithNativeLib() throws Exception { super(FILE_SYSTEM_OPTIMIZED, false, false, false); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 2a5da96f63f6..a200a36cb25d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -25,18 +25,16 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRawSSTFileReader; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -88,7 +86,6 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.SynchronousQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; @@ -111,6 +108,8 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.checkSnapshotActive; @@ -182,9 +181,7 @@ public class SnapshotDiffManager implements AutoCloseable { private final boolean diffDisableNativeLibs; - private final Optional sstDumpTool; - - private Optional sstDumpToolExecService; + private final boolean isNativeLibsLoaded; private final BiFunction generateSnapDiffJobKey = @@ -259,7 +256,7 @@ public SnapshotDiffManager(ManagedRocksDB db, createEmptySnapDiffDir(path); this.sstBackupDirForSnapDiffJobs = path.toString(); - this.sstDumpTool = initSSTDumpTool(ozoneManager.getConfiguration()); + this.isNativeLibsLoaded = initNativeLibraryForEfficientDiff(ozoneManager.getConfiguration()); // Ideally, loadJobsOnStartUp should run only on OM node, since SnapDiff // is not HA currently and running this on all the nodes would be @@ -282,35 +279,16 @@ public PersistentMap getSnapDiffJobTable() { return snapDiffJobTable; } - private Optional initSSTDumpTool( - final OzoneConfiguration conf) { - if (conf.getBoolean(OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, - OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { + private boolean initNativeLibraryForEfficientDiff(final OzoneConfiguration conf) { + if (conf.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) { try { - int threadPoolSize = conf.getInt( - OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, - OMConfigKeys - .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT); - int bufferSize = (int) conf.getStorageSize( - OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE, - OMConfigKeys - .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, - StorageUnit.BYTES); - this.sstDumpToolExecService = Optional.of(new ThreadPoolExecutor(0, - threadPoolSize, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder() - .setNameFormat(ozoneManager.getThreadNamePrefix() + - "snapshot-diff-manager-sst-dump-tool-TID-%d") - .build(), - new ThreadPoolExecutor.DiscardPolicy())); - return Optional.of(new ManagedSSTDumpTool(sstDumpToolExecService.get(), - bufferSize)); + return ManagedRawSSTFileReader.loadLibrary(); } catch (NativeLibraryNotLoadedException e) { - this.sstDumpToolExecService.ifPresent(exec -> - closeExecutorService(exec, "SstDumpToolExecutor")); + LOG.error("Native Library for raw sst file reading loading failed.", e); + return false; } } - return Optional.empty(); + return false; } /** @@ -1052,12 +1030,12 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( // Workaround to handle deletes if native rocksDb tool for reading // tombstone is not loaded. // TODO: [SNAPSHOT] Update Rocksdb SSTFileIterator to read tombstone - if (skipNativeDiff || !sstDumpTool.isPresent()) { + if (skipNativeDiff || !isNativeLibsLoaded) { deltaFiles.addAll(getSSTFileListForSnapshot(fromSnapshot, tablesToLookUp)); } addToObjectIdMap(fsTable, tsTable, deltaFiles, - !skipNativeDiff && sstDumpTool.isPresent(), + !skipNativeDiff && isNativeLibsLoaded, oldObjIdToKeyMap, newObjIdToKeyMap, objectIdToIsDirMap, oldParentIds, newParentIds, tablePrefixes); } @@ -1088,12 +1066,9 @@ void addToObjectIdMap(Table fsTable, upperBoundCharArray[upperBoundCharArray.length - 1] += 1; sstFileReaderUpperBound = String.valueOf(upperBoundCharArray); } - try (Stream keysToCheck = - nativeRocksToolsLoaded && sstDumpTool.isPresent() - ? sstFileReader.getKeyStreamWithTombstone(sstDumpTool.get(), - sstFileReaderLowerBound, sstFileReaderUpperBound) - : sstFileReader.getKeyStream(sstFileReaderLowerBound, - sstFileReaderUpperBound)) { + try (Stream keysToCheck = nativeRocksToolsLoaded ? + sstFileReader.getKeyStreamWithTombstone(sstFileReaderLowerBound, sstFileReaderUpperBound) + : sstFileReader.getKeyStream(sstFileReaderLowerBound, sstFileReaderUpperBound)) { keysToCheck.forEach(key -> { try { final WithParentObjectId fromObjectId = fsTable.get(key); @@ -1674,8 +1649,6 @@ public void close() { if (snapDiffExecutor != null) { closeExecutorService(snapDiffExecutor, "SnapDiffExecutor"); } - this.sstDumpToolExecService.ifPresent(exec -> - closeExecutorService(exec, "SstDumpToolExecutor")); } private void closeExecutorService(ExecutorService executorService, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 3f9a1b3ae5bf..543212666e4c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -21,12 +21,10 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -35,7 +33,6 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; -import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry; @@ -67,7 +64,6 @@ import org.apache.ozone.rocksdiff.DifferSnapshotInfo; import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; import org.apache.ozone.rocksdiff.RocksDiffUtils; -import org.apache.ozone.test.tag.Unhealthy; import org.apache.ratis.util.ExitUtils; import org.apache.ratis.util.TimeDuration; import jakarta.annotation.Nonnull; @@ -133,10 +129,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_JOB_TABLE_NAME; import static org.apache.hadoop.ozone.om.OmSnapshotManager.SNAP_DIFF_REPORT_TABLE_NAME; @@ -339,15 +331,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .getInt(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE, OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_DIFF_THREAD_POOL_SIZE_DEFAULT); - when(configuration - .getInt(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT)) - .thenReturn(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE_DEFAULT); - when(configuration - .getStorageSize(OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE, - OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, - StorageUnit.BYTES)) - .thenReturn(FileUtils.ONE_KB_BI.doubleValue()); when(configuration.getBoolean(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB, OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT)) .thenReturn(OZONE_OM_SNAPSHOT_LOAD_NATIVE_LIB_DEFAULT); @@ -379,6 +362,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); omSnapshotManager = mock(OmSnapshotManager.class); + when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.isSnapshotStatus(any(), any())).thenReturn(true); SnapshotCache snapshotCache = new SnapshotCache(mockCacheLoader(), 10); @@ -391,6 +375,7 @@ public void init() throws RocksDBException, IOException, ExecutionException { when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); snapshotDiffManager = new SnapshotDiffManager(db, differ, ozoneManager, snapDiffJobTable, snapDiffReportTable, columnFamilyOptions, codecRegistry); + when(omSnapshotManager.getDiffCleanupServiceInterval()).thenReturn(0L); } private CacheLoader mockCacheLoader() { @@ -667,15 +652,11 @@ public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, try (MockedConstruction mockedSSTFileReader = mockConstruction(SstFileSetReader.class, (mock, context) -> { - when(mock.getKeyStreamWithTombstone(any(), any(), any())) + when(mock.getKeyStreamWithTombstone(any(), any())) .thenReturn(keysIncludingTombstones.stream()); when(mock.getKeyStream(any(), any())) .thenReturn(keysExcludingTombstones.stream()); }); - MockedConstruction mockedSSTDumpTool = - mockConstruction(ManagedSSTDumpTool.class, - (mock, context) -> { - }) ) { Map toSnapshotTableMap = IntStream.concat(IntStream.range(0, 25), IntStream.range(50, 100)) @@ -1579,7 +1560,6 @@ public void testGetSnapshotDiffReportHappyCase() throws Exception { * Tests that only QUEUED jobs are submitted to the executor and rest are * short-circuited based on previous one. */ - @Unhealthy @Test public void testGetSnapshotDiffReportJob() throws Exception { for (int i = 0; i < jobStatuses.size(); i++) { diff --git a/pom.xml b/pom.xml index 5dce3f2f3573..84fe041db915 100644 --- a/pom.xml +++ b/pom.xml @@ -297,11 +297,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.9.7 1.14.0 2.4.0 - 1.0.8 - 1.2.13 - 1.9.3 - 1.1.8 - 1.4.9 1.0.1 5.3.27 @@ -309,9 +304,10 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 5.1.0 + 1.2.1 + 3.9.6 - @@ -2091,6 +2087,18 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + org.codehaus.mojo + properties-maven-plugin + ${properties.maven.plugin.version} + + + org.apache.maven + maven-core + ${maven.core.version} + + + From 284846f2c8523922d1177575c0d2830a56fb2755 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Thu, 22 Feb 2024 16:31:52 -0800 Subject: [PATCH 046/108] HDDS-10363. HDDS-9388 broke encryption. (#6219) --- .../hdds/utils/ClusterContainersUtil.java | 147 ++++++++++++++++++ .../client/rpc/TestOzoneAtRestEncryption.java | 69 ++++++++ .../rpc/TestOzoneRpcClientAbstract.java | 62 ++------ .../om/request/file/OMFileCreateRequest.java | 5 +- .../file/OMFileCreateRequestWithFSO.java | 1 + .../om/request/key/OMKeyCreateRequest.java | 2 + .../key/OMKeyCreateRequestWithFSO.java | 2 + .../ozone/om/request/key/OMKeyRequest.java | 19 ++- .../request/file/TestOMFileCreateRequest.java | 45 ++++++ 9 files changed, 294 insertions(+), 58 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java new file mode 100644 index 000000000000..e7e0337b5f9f --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/ClusterContainersUtil.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils; + +import com.google.common.base.Preconditions; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.ozone.HddsDatanodeService; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.container.common.helpers.BlockData; +import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; +import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.Charset; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +/** + * Utility method to manipulate/inspect container data on disk in a mini cluster. + */ +public final class ClusterContainersUtil { + private ClusterContainersUtil() { + } + + /** + * + * + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @return the location of the chunk file. + * @throws IOException + */ + public static File getChunksLocationPath(MiniOzoneCluster cluster, Container container, OzoneKey key) + throws IOException { + Preconditions.checkArgument(key instanceof OzoneKeyDetails); + long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) + .getContainerID(); + long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) + .getLocalID(); + // From the containerData, get the block iterator for all the blocks in + // the container. + KeyValueContainerData containerData = + (KeyValueContainerData) container.getContainerData(); + try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); + BlockIterator keyValueBlockIterator = + db.getStore().getBlockIterator(containerID)) { + // Find the block corresponding to the key we put. We use the localID of + // the BlockData to identify out key. + BlockData blockData = null; + while (keyValueBlockIterator.hasNext()) { + blockData = keyValueBlockIterator.nextBlock(); + if (blockData.getBlockID().getLocalID() == localID) { + break; + } + } + assertNotNull(blockData, "Block not found"); + + // Get the location of the chunk file + String containreBaseDir = + container.getContainerData().getVolume().getHddsRootDir().getPath(); + File chunksLocationPath = KeyValueContainerLocationUtil + .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); + return chunksLocationPath; + } + } + + /** + * Corrupt the chunk backing the key in a mini cluster. + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @throws IOException + */ + public static void corruptData(MiniOzoneCluster cluster, Container container, OzoneKey key) + throws IOException { + File chunksLocationPath = getChunksLocationPath(cluster, container, key); + byte[] corruptData = "corrupted data".getBytes(UTF_8); + // Corrupt the contents of chunk files + for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { + FileUtils.writeByteArrayToFile(file, corruptData); + } + } + + /** + * Inspect and verify if chunk backing the key in a mini cluster is the same as the string. + * @param cluster a mini ozone cluster object. + * @param container a container object. + * @param key an OzoneKey object. + * @return true if the same; false if does not match. + * @throws IOException + */ + public static boolean verifyOnDiskData(MiniOzoneCluster cluster, Container container, OzoneKey key, String data) + throws IOException { + File chunksLocationPath = getChunksLocationPath(cluster, container, key); + for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { + String chunkOnDisk = FileUtils.readFileToString(file, Charset.defaultCharset()); + if (!data.equals(chunkOnDisk)) { + return false; + } + } + return true; + } + + /** + * Return the first container object in a mini cluster specified by its ID. + * @param cluster a mini ozone cluster object. + * @param containerID a long variable representing cluater ID. + * @return the container object; null if not found. + */ + public static Container getContainerByID(MiniOzoneCluster cluster, long containerID) { + // Get the container by traversing the datanodes. Atleast one of the + // datanode must have this container. + Container container = null; + for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { + container = hddsDatanode.getDatanodeStateMachine().getContainer() + .getContainerSet().getContainer(containerID); + if (container != null) { + break; + } + } + return container; + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 0b0149b4d9c0..29cf1bc5e117 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -40,6 +40,9 @@ import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -66,8 +69,10 @@ import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -81,6 +86,14 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.getContainerByID; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.verifyOnDiskData; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.ozone.test.GenericTestUtils.getTestStartTime; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -90,6 +103,7 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -173,6 +187,11 @@ static void init() throws Exception { // create test key createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf); eTagProvider = MessageDigest.getInstance(OzoneConsts.MD5_HASH); + + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + conf.setInt(OZONE_REPLICATION, 1); } @AfterAll @@ -210,6 +229,7 @@ void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { createAndVerifyKeyData(bucket); createAndVerifyStreamKeyData(bucket); + createAndVerifyFileSystemData(bucket); } @ParameterizedTest @@ -258,6 +278,27 @@ static void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { verifyKeyData(bucket, keyName, value, testStartTime); } + static void createAndVerifyFileSystemData( + OzoneBucket bucket) throws Exception { + // OBS does not support file system semantics. + if (bucket.getBucketLayout() == BucketLayout.OBJECT_STORE) { + return; + } + Instant testStartTime = getTestStartTime(); + String keyName = UUID.randomUUID().toString(); + String value = "sample value"; + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + final Path file = new Path(dir, keyName); + try (FileSystem fs = FileSystem.get(conf)) { + try (FSDataOutputStream out = fs.create(file, true)) { + out.write(value.getBytes(StandardCharsets.UTF_8)); + } + } + verifyKeyData(bucket, keyName, value, testStartTime); + } + static void verifyKeyData(OzoneBucket bucket, String keyName, String value, Instant testStartTime) throws Exception { // Verify content. @@ -284,6 +325,13 @@ static void verifyKeyData(OzoneBucket bucket, String keyName, String value, assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); assertFalse(key.getCreationTime().isBefore(testStartTime)); assertFalse(key.getModificationTime().isBefore(testStartTime)); + + long containerID = key.getOzoneKeyLocations().get(0) + .getContainerID(); + Container container = getContainerByID(cluster, containerID); + // the data stored on disk should not be the same as the input. + assertFalse(verifyOnDiskData(cluster, container, key, value), + "On disk block is written in clear text!"); } private OzoneBucket createVolumeAndBucket(String volumeName, @@ -443,6 +491,18 @@ void mpuOnePart(BucketLayout bucketLayout) throws Exception { createVolumeAndBucket(volumeName, bucketName, bucketLayout), 1); } + @ParameterizedTest + @EnumSource + void mpuOnePartInvalidUploadID(BucketLayout bucketLayout) throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + OMException e = assertThrows(OMException.class, () -> + testMultipartUploadWithEncryption( + createVolumeAndBucket(volumeName, bucketName, bucketLayout), 1, false, true) + ); + assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, e.getResult()); + } + @ParameterizedTest @EnumSource void mpuTwoParts(BucketLayout bucketLayout) throws Exception { @@ -520,12 +580,21 @@ private void testMultipartUploadWithEncryption(OzoneBucket bucket, private void testMultipartUploadWithEncryption(OzoneBucket bucket, int numParts, boolean isStream) throws Exception { + testMultipartUploadWithEncryption(bucket, numParts, isStream, false); + } + + private void testMultipartUploadWithEncryption(OzoneBucket bucket, + int numParts, boolean isStream, boolean invalidUploadID) throws Exception { String keyName = "mpu_test_key_" + numParts; // Initiate multipart upload String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationConfig.fromTypeAndFactor(RATIS, ONE)); + if (invalidUploadID) { + uploadID += "random1234"; + } + // Upload Parts Map partsMap = new TreeMap<>(); List partsData = new ArrayList<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index f2efe84b9c2c..da41561f8ce2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import java.io.File; import java.io.IOException; import java.io.InputStream; import java.security.MessageDigest; @@ -96,7 +95,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmFailoverProxyUtil; @@ -127,7 +125,7 @@ import org.apache.ozone.test.tag.Flaky; import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.io.FileUtils; + import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.StringUtils.string2Bytes; @@ -145,6 +143,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.ETAG; import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.apache.hadoop.ozone.OzoneConsts.MD5_HASH; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.corruptData; +import static org.apache.hadoop.hdds.utils.ClusterContainersUtil.getContainerByID; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_RENAME; @@ -1722,16 +1722,9 @@ private void createAndCorruptKey(String volumeName, String bucketName, // Get the container by traversing the datanodes. Atleast one of the // datanode must have this container. - Container container = null; - for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) { - container = hddsDatanode.getDatanodeStateMachine().getContainer() - .getContainerSet().getContainer(containerID); - if (container != null) { - break; - } - } + Container container = getContainerByID(cluster, containerID); assertNotNull(container, "Container not found"); - corruptData(container, key); + corruptData(cluster, container, key); } @@ -1894,7 +1887,7 @@ public void testReadKeyWithCorruptedData() throws IOException { } } assertNotNull(container, "Container not found"); - corruptData(container, key); + corruptData(cluster, container, key); // Try reading the key. Since the chunk file is corrupted, it should // throw a checksum mismatch exception. @@ -2049,7 +2042,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { } } assertThat(containerList).withFailMessage("Container not found").isNotEmpty(); - corruptData(containerList.get(0), key); + corruptData(cluster, containerList.get(0), key); // Try reading the key. Read will fail on the first node and will eventually // failover to next replica try (OzoneInputStream is = bucket.readKey(keyName)) { @@ -2057,7 +2050,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { is.read(b); assertArrayEquals(b, data); } - corruptData(containerList.get(1), key); + corruptData(cluster, containerList.get(1), key); // Try reading the key. Read will fail on the first node and will eventually // failover to next replica try (OzoneInputStream is = bucket.readKey(keyName)) { @@ -2065,7 +2058,7 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { is.read(b); assertArrayEquals(b, data); } - corruptData(containerList.get(2), key); + corruptData(cluster, containerList.get(2), key); // Try reading the key. Read will fail here as all the replicas are corrupt IOException ioException = assertThrows(IOException.class, () -> { @@ -2077,43 +2070,6 @@ void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { assertThat(ioException).hasMessageContaining("Checksum mismatch"); } - private void corruptData(Container container, OzoneKey key) - throws IOException { - long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getContainerID(); - long localID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0) - .getLocalID(); - // From the containerData, get the block iterator for all the blocks in - // the container. - KeyValueContainerData containerData = - (KeyValueContainerData) container.getContainerData(); - try (DBHandle db = BlockUtils.getDB(containerData, cluster.getConf()); - BlockIterator keyValueBlockIterator = - db.getStore().getBlockIterator(containerID)) { - // Find the block corresponding to the key we put. We use the localID of - // the BlockData to identify out key. - BlockData blockData = null; - while (keyValueBlockIterator.hasNext()) { - blockData = keyValueBlockIterator.nextBlock(); - if (blockData.getBlockID().getLocalID() == localID) { - break; - } - } - assertNotNull(blockData, "Block not found"); - - // Get the location of the chunk file - String containreBaseDir = - container.getContainerData().getVolume().getHddsRootDir().getPath(); - File chunksLocationPath = KeyValueContainerLocationUtil - .getChunksLocationPath(containreBaseDir, cluster.getClusterId(), containerID); - byte[] corruptData = "corrupted data".getBytes(UTF_8); - // Corrupt the contents of chunk files - for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { - FileUtils.writeByteArrayToFile(file, corruptData); - } - } - } - @Test public void testDeleteKey() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index d4bc91dbfdf6..9b9fb4e7cc5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -154,10 +154,10 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { .map(info -> info.getProtobuf(getOmRequest().getVersion())) .collect(Collectors.toList())); + generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); + KeyArgs resolvedArgs = resolveBucketAndCheckKeyAcls(newKeyArgs.build(), ozoneManager, IAccessAuthorizer.ACLType.CREATE); - - generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager); CreateFileRequest.Builder newCreateFileRequest = createFileRequest.toBuilder().setKeyArgs(resolvedArgs) .setClientID(UniqueId.next()); @@ -255,6 +255,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getPrefixManager(), omBucketInfo, pathInfo, trxnLogIndex, ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(omBucketInfo, keyArgs); long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); long clientID = createFileRequest.getClientID(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 393be170a5b4..6910061c771c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -172,6 +172,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn bucketInfo, pathInfoFSO, trxnLogIndex, pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); long clientID = createFileRequest.getClientID(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 48805d6e4e5a..e9a9f007197a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -281,6 +281,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getObjectIdFromTxId(trxnLogIndex), ozoneManager.isRatisEnabled(), replicationConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); + long openVersion = omKeyInfo.getLatestVersionLocations().getVersion(); long clientID = createKeyRequest.getClientID(); String dbOpenKeyName = omMetadataManager.getOpenKey(volumeName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 0dec9fa459f6..6fe8c1208586 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -157,6 +157,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn pathInfoFSO.getLeafNodeObjectId(), ozoneManager.isRatisEnabled(), repConfig); + validateEncryptionKeyInfo(bucketInfo, keyArgs); + long openVersion = omFileInfo.getLatestVersionLocations().getVersion(); long clientID = createKeyRequest.getClientID(); String dbOpenFileName = omMetadataManager diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 8a0cfd78416e..100c2d842f22 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -96,6 +96,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes .VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; @@ -591,9 +592,14 @@ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs, omMetadataManager.getOpenKeyTable(getBucketLayout()) .get(dbMultipartOpenKey); - if (omKeyInfo != null && omKeyInfo.getFileEncryptionInfo() != null) { - newKeyArgs.setFileEncryptionInfo( - OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo())); + if (omKeyInfo != null) { + if (omKeyInfo.getFileEncryptionInfo() != null) { + newKeyArgs.setFileEncryptionInfo( + OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo())); + } + } else { + LOG.warn("omKeyInfo not found. Key: " + dbMultipartOpenKey + + ". The upload id " + keyArgs.getMultipartUploadID() + " may be invalid."); } } finally { if (acquireLock) { @@ -1051,4 +1057,11 @@ protected void filterOutBlocksStillInUse(OmKeyInfo referenceKey, LOG.debug("After block filtering, keysToBeFiltered = {}", keysToBeFiltered); } + + protected void validateEncryptionKeyInfo(OmBucketInfo bucketInfo, KeyArgs keyArgs) throws OMException { + if (bucketInfo.getEncryptionKeyInfo() != null && !keyArgs.hasFileEncryptionInfo()) { + throw new OMException("Attempting to create unencrypted file " + + keyArgs.getKeyName() + " in encrypted bucket " + keyArgs.getBucketName(), INVALID_REQUEST); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index 74b067a76a45..fbf9c0bb8528 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -24,9 +24,13 @@ import java.util.UUID; import java.util.stream.Collectors; +import org.apache.hadoop.crypto.CipherSuite; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; @@ -58,6 +62,9 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests OMFileCreateRequest. @@ -202,6 +209,44 @@ public void testValidateAndUpdateCacheWithNamespaceQuotaExceeded() OzoneManagerProtocolProtos.Status.QUOTA_EXCEEDED); } + @Test + public void testValidateAndUpdateEncryption() throws Exception { + KeyProviderCryptoExtension.EncryptedKeyVersion eKV = + KeyProviderCryptoExtension.EncryptedKeyVersion.createForDecryption( + "key1", "v1", new byte[0], new byte[0]); + KeyProviderCryptoExtension mockKeyProvider = mock(KeyProviderCryptoExtension.class); + when(mockKeyProvider.generateEncryptedKey(any())).thenReturn(eKV); + + when(ozoneManager.getKmsProvider()).thenReturn(mockKeyProvider); + keyName = "test/" + keyName; + OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, + HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, + false, true); + + // add volume and create bucket with bucket encryption key + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, omMetadataManager, + OmBucketInfo.newBuilder().setVolumeName(volumeName) + .setBucketName(bucketName) + .setBucketLayout(getBucketLayout()) + .setBucketEncryptionKey( + new BucketEncryptionKeyInfo.Builder() + .setKeyName("key1") + .setSuite(mock(CipherSuite.class)) + .setVersion(mock(CryptoProtocolVersion.class)) + .build())); + + OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); + OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); + + OMFileCreateRequest omFileCreateRequestPreExecuted = getOMFileCreateRequest(modifiedOmRequest); + OMClientResponse omClientResponse = omFileCreateRequestPreExecuted + .validateAndUpdateCache(ozoneManager, 100L); + assertEquals( + OzoneManagerProtocolProtos.Status.OK, omClientResponse.getOMResponse().getStatus()); + assertTrue(omClientResponse.getOMResponse().getCreateFileResponse().getKeyInfo().hasFileEncryptionInfo()); + when(ozoneManager.getKmsProvider()).thenReturn(null); + } + @Test public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, From 83cb2b7d1d294158f55b106a2d61e2c878d74d72 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 23 Feb 2024 09:48:42 +0100 Subject: [PATCH 047/108] HDDS-10320. Introduce factory to configure MiniOzoneCluster's datanodes (#6246) --- .../hadoop/ozone/MiniOzoneChaosCluster.java | 13 -- .../ozone/TestMiniChaosOzoneCluster.java | 4 +- .../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 5 +- .../hdds/upgrade/TestScmHAFinalization.java | 5 +- .../apache/hadoop/ozone/MiniOzoneCluster.java | 48 ++--- .../hadoop/ozone/MiniOzoneClusterImpl.java | 67 +------ .../hadoop/ozone/TestMiniOzoneCluster.java | 6 +- .../hadoop/ozone/UniformDatanodesFactory.java | 167 ++++++++++++++++++ ...estDatanodeHddsVolumeFailureDetection.java | 1 - ...stDatanodeHddsVolumeFailureToleration.java | 5 +- 10 files changed, 198 insertions(+), 123 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 76da4a5a8cab..143ec59ddece 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -281,19 +281,6 @@ protected void initializeConfiguration() throws IOException { OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, 100); } - /** - * Sets the number of data volumes per datanode. - * - * @param val number of volumes per datanode. - * - * @return MiniOzoneCluster.Builder - */ - @Override - public Builder setNumDataVolumes(int val) { - numDataVolumes = val; - return this; - } - @Override public MiniOzoneChaosCluster build() throws IOException { DefaultMetricsSystem.setMiniClusterMode(true); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java index 6894aed25ab6..5be5c3ef0c5b 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java @@ -130,7 +130,9 @@ public static void init() throws Exception { .setOMServiceID(omServiceId) .setNumStorageContainerManagers(numStorageContainerManagerss) .setSCMServiceID(scmServiceId) - .setNumDataVolumes(numDataVolumes); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(numDataVolumes) + .build()); failureClasses.forEach(chaosBuilder::addFailures); cluster = chaosBuilder.build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 199b4b63ff74..97855f3775fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -76,6 +76,7 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneClusterProvider; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -170,7 +171,9 @@ public static void initClass() { .setNumDatanodes(NUM_DATA_NODES) .setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) - .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) + .build()); // Setting the provider to a max of 100 clusters. Some of the tests here // use multiple clusters, so its hard to know exactly how many will be diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index aa9f561aa02b..a8af377e9846 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizationContext; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.upgrade.DefaultUpgradeFinalizationExecutor; import org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizationExecutor; @@ -98,7 +99,9 @@ public void init(OzoneConfiguration conf, .setSCMConfigurator(configurator) .setNumOfOzoneManagers(1) .setNumDatanodes(NUM_DATANODES) - .setDnLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) + .build()); this.cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); scmClient = cluster.getStorageContainerLocationClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index c8e32a7917d2..33e15bf98607 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.List; -import java.util.Optional; import java.util.UUID; import java.util.concurrent.TimeoutException; @@ -38,6 +37,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.util.ExitUtils; +import org.apache.ratis.util.function.CheckedFunction; /** * Interface used for MiniOzoneClusters. @@ -287,16 +287,13 @@ abstract class Builder { protected String scmId = UUID.randomUUID().toString(); protected String omId = UUID.randomUUID().toString(); - protected Optional datanodeReservedSpace = Optional.empty(); protected boolean includeRecon = false; - protected Optional dnLayoutVersion = Optional.empty(); - protected int numOfDatanodes = 3; - protected int numDataVolumes = 1; protected boolean startDataNodes = true; protected CertificateClient certClient; protected SecretKeyClient secretKeyClient; + protected DatanodeFactory dnFactory = UniformDatanodesFactory.newBuilder().build(); protected Builder(OzoneConfiguration conf) { this.conf = conf; @@ -366,33 +363,8 @@ public Builder setNumDatanodes(int val) { return this; } - /** - * Sets the number of data volumes per datanode. - * - * @param val number of volumes per datanode. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setNumDataVolumes(int val) { - numDataVolumes = val; - return this; - } - - /** - * Sets the reserved space - * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys} - * HDDS_DATANODE_DIR_DU_RESERVED - * for each volume in each datanode. - * @param reservedSpace String that contains the numeric size value and - * ends with a - * {@link org.apache.hadoop.hdds.conf.StorageUnit} - * suffix. For example, "50GB". - * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo - * - * @return {@link MiniOzoneCluster} Builder - */ - public Builder setDatanodeReservedSpace(String reservedSpace) { - datanodeReservedSpace = Optional.of(reservedSpace); + public Builder setDatanodeFactory(DatanodeFactory factory) { + this.dnFactory = factory; return this; } @@ -431,11 +403,6 @@ public Builder setSCMServiceId(String serviceId) { return this; } - public Builder setDnLayoutVersion(int layoutVersion) { - dnLayoutVersion = Optional.of(layoutVersion); - return this; - } - /** * Constructs and returns MiniOzoneCluster. * @@ -443,4 +410,11 @@ public Builder setDnLayoutVersion(int layoutVersion) { */ public abstract MiniOzoneCluster build() throws IOException; } + + /** + * Factory to customize configuration of each datanode. + */ + interface DatanodeFactory extends CheckedFunction { + // marker + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 0e71063600da..859ce4740348 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -27,18 +27,15 @@ import java.util.Collections; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.Iterator; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -68,10 +65,8 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.common.Storage.StorageState; -import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.DatanodeStoreCache; -import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; @@ -88,11 +83,6 @@ import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_TASK_SAFEMODE_WAIT_THRESHOLD; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; @@ -744,55 +734,15 @@ protected List createHddsDatanodes() throws IOException { List hddsDatanodes = new ArrayList<>(); for (int i = 0; i < numOfDatanodes; i++) { - OzoneConfiguration dnConf = new OzoneConfiguration(conf); - configureDatanodePorts(dnConf); - String datanodeBaseDir = path + "/datanode-" + i; - Path metaDir = Paths.get(datanodeBaseDir, "meta"); - List dataDirs = new ArrayList<>(); - List reservedSpaceList = new ArrayList<>(); - for (int j = 0; j < numDataVolumes; j++) { - Path dir = Paths.get(datanodeBaseDir, "data-" + j, "containers"); - Files.createDirectories(dir); - dataDirs.add(dir.toString()); - datanodeReservedSpace.ifPresent( - s -> reservedSpaceList.add(dir + ":" + s)); - } - String reservedSpaceString = String.join(",", reservedSpaceList); - String listOfDirs = String.join(",", dataDirs); - Path ratisDir = Paths.get(datanodeBaseDir, "data", "ratis"); - Path workDir = Paths.get(datanodeBaseDir, "data", "replication", - "work"); - Files.createDirectories(metaDir); - Files.createDirectories(ratisDir); - Files.createDirectories(workDir); - dnConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - dnConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, listOfDirs); - dnConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, listOfDirs); - dnConf.set(ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED, - reservedSpaceString); - dnConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, - ratisDir.toString()); + OzoneConfiguration dnConf = dnFactory.apply(conf); HddsDatanodeService datanode = new HddsDatanodeService(NO_ARGS); datanode.setConfiguration(dnConf); hddsDatanodes.add(datanode); } - if (dnLayoutVersion.isPresent()) { - configureLayoutVersionInDatanodes(hddsDatanodes, dnLayoutVersion.get()); - } return hddsDatanodes; } - private void configureLayoutVersionInDatanodes( - List dns, int layoutVersion) throws IOException { - for (HddsDatanodeService dn : dns) { - DatanodeLayoutStorage layoutStorage; - layoutStorage = new DatanodeLayoutStorage(dn.getConf(), - UUID.randomUUID().toString(), layoutVersion); - layoutStorage.initialize(); - } - } - protected void configureSCM() { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, localhostWithFreePort()); @@ -814,21 +764,6 @@ private void configureOM() { conf.setInt(OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, getFreePort()); } - protected void configureDatanodePorts(ConfigurationTarget conf) { - conf.set(ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY, - anyHostWithFreePort()); - conf.set(HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY, - anyHostWithFreePort()); - conf.set(HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY, - anyHostWithFreePort()); - conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); - conf.setFromObject(new ReplicationConfig().setPort(getFreePort())); - } - protected void configureRecon() { ConfigurationProvider.resetConfiguration(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 74d52c4a9457..020f8623c4ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -258,8 +258,10 @@ public void testMultipleDataDirs() throws Exception { String reservedSpace = "1B"; cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1) - .setNumDataVolumes(3) - .setDatanodeReservedSpace(reservedSpace) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(3) + .setReservedSpace(reservedSpace) + .build()) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java new file mode 100644 index 000000000000..6cc6bcb8e95d --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.conf.ConfigurationTarget; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; +import org.apache.hadoop.ozone.container.replication.ReplicationServer; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; +import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; + +/** + * Creates datanodes with similar configuration (same number of volumes, same layout version, etc.). + */ +public class UniformDatanodesFactory implements MiniOzoneCluster.DatanodeFactory { + + private final AtomicInteger nodesCreated = new AtomicInteger(); + + private final int numDataVolumes; + private final String reservedSpace; + private final Integer layoutVersion; + + protected UniformDatanodesFactory(Builder builder) { + numDataVolumes = builder.numDataVolumes; + layoutVersion = builder.layoutVersion; + reservedSpace = builder.reservedSpace; + } + + @Override + public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException { + final int i = nodesCreated.incrementAndGet(); + final OzoneConfiguration dnConf = new OzoneConfiguration(conf); + + configureDatanodePorts(dnConf); + + Path baseDir = Paths.get(Objects.requireNonNull(conf.get(OZONE_METADATA_DIRS)), "datanode-" + i); + + Path metaDir = baseDir.resolve("meta"); + Files.createDirectories(metaDir); + dnConf.set(OZONE_METADATA_DIRS, metaDir.toString()); + + List dataDirs = new ArrayList<>(); + List reservedSpaceList = new ArrayList<>(); + for (int j = 0; j < numDataVolumes; j++) { + Path dir = baseDir.resolve("data-" + j); + Files.createDirectories(dir); + dataDirs.add(dir.toString()); + if (reservedSpace != null) { + reservedSpaceList.add(dir + ":" + reservedSpace); + } + } + String reservedSpaceString = String.join(",", reservedSpaceList); + String listOfDirs = String.join(",", dataDirs); + dnConf.set(DFS_DATANODE_DATA_DIR_KEY, listOfDirs); + dnConf.set(HDDS_DATANODE_DIR_KEY, listOfDirs); + dnConf.set(HDDS_DATANODE_DIR_DU_RESERVED, reservedSpaceString); + + Path ratisDir = baseDir.resolve("ratis"); + Files.createDirectories(ratisDir); + dnConf.set(DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); + + if (layoutVersion != null) { + DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage( + dnConf, UUID.randomUUID().toString(), layoutVersion); + layoutStorage.initialize(); + } + + return dnConf; + } + + private void configureDatanodePorts(ConfigurationTarget conf) { + conf.set(HDDS_REST_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(HDDS_DATANODE_HTTP_ADDRESS_KEY, anyHostWithFreePort()); + conf.set(HDDS_DATANODE_CLIENT_ADDRESS_KEY, anyHostWithFreePort()); + conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); + conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); + conf.setFromObject(new ReplicationServer.ReplicationConfig().setPort(getFreePort())); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder for UniformDatanodesFactory. + */ + public static class Builder { + + private int numDataVolumes = 1; + private String reservedSpace; + private Integer layoutVersion; + + /** + * Sets the number of data volumes per datanode. + */ + public Builder setNumDataVolumes(int n) { + numDataVolumes = n; + return this; + } + + /** + * Sets the reserved space + * {@link org.apache.hadoop.hdds.scm.ScmConfigKeys#HDDS_DATANODE_DIR_DU_RESERVED} + * for each volume in each datanode. + * @param reservedSpace String that contains the numeric size value and ends with a + * {@link org.apache.hadoop.hdds.conf.StorageUnit} suffix. For example, "50GB". + * @see org.apache.hadoop.ozone.container.common.volume.VolumeInfo + */ + public Builder setReservedSpace(String reservedSpace) { + this.reservedSpace = reservedSpace; + return this; + } + + public Builder setLayoutVersion(int layoutVersion) { + this.layoutVersion = layoutVersion; + return this; + } + + public UniformDatanodesFactory build() { + return new UniformDatanodesFactory(this); + } + + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index 3e22c1db90de..8d77b6cc58b5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -274,7 +274,6 @@ private static MiniOzoneCluster newCluster(boolean schemaV3) ozoneConfig.setFromObject(dnConf); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConfig) .setNumDatanodes(1) - .setNumDataVolumes(1) .build(); cluster.waitForClusterToBeReady(); cluster.waitForPipelineTobeReady(ReplicationFactor.ONE, 30000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java index 0273deb50e61..98ab87b871de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureToleration.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -90,7 +91,9 @@ public void init() throws Exception { ozoneConfig.setFromObject(dnConf); cluster = MiniOzoneCluster.newBuilder(ozoneConfig) .setNumDatanodes(1) - .setNumDataVolumes(3) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setNumDataVolumes(3) + .build()) .build(); cluster.waitForClusterToBeReady(); datanodes = cluster.getHddsDatanodes(); From 13b635c580592d198b9389e182f123ea24b5e49c Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Fri, 23 Feb 2024 02:25:30 -0800 Subject: [PATCH 048/108] HDDS-10410. Avoid creating ChunkInfo. (#6258) --- .../common/impl/ContainerLayoutVersion.java | 20 +++----- .../keyvalue/KeyValueContainerCheck.java | 2 +- .../helpers/KeyValueContainerUtil.java | 51 ++----------------- .../keyvalue/impl/FilePerBlockStrategy.java | 18 +++---- .../keyvalue/impl/FilePerChunkStrategy.java | 3 +- .../keyvalue/TestKeyValueContainerCheck.java | 5 +- .../impl/CommonChunkManagerTestCases.java | 2 +- .../impl/TestFilePerChunkStrategy.java | 4 +- .../hadoop/ozone/debug/ChunkKeyHandler.java | 3 +- 9 files changed, 24 insertions(+), 84 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index 8444b3bda1e8..210c538f274a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -25,12 +25,9 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import com.google.common.collect.ImmutableList; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Defines layout versions for the Chunks. @@ -39,22 +36,17 @@ public enum ContainerLayoutVersion { FILE_PER_CHUNK(1, "One file per chunk") { @Override - public File getChunkFile(File chunkDir, BlockID blockID, - ChunkInfo info) { - return new File(chunkDir, info.getChunkName()); + public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { + return new File(chunkDir, chunkName); } }, FILE_PER_BLOCK(2, "One file per block") { @Override - public File getChunkFile(File chunkDir, BlockID blockID, - ChunkInfo info) { + public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { return new File(chunkDir, blockID.getLocalID() + ".block"); } }; - private static final Logger LOG = - LoggerFactory.getLogger(ContainerLayoutVersion.class); - private static final ContainerLayoutVersion DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; @@ -118,12 +110,12 @@ public String getDescription() { } public abstract File getChunkFile(File chunkDir, - BlockID blockID, ChunkInfo info); + BlockID blockID, String chunkName); public File getChunkFile(ContainerData containerData, BlockID blockID, - ChunkInfo info) throws StorageContainerException { + String chunkName) throws StorageContainerException { File chunkDir = ContainerUtils.getChunkDir(containerData); - return getChunkFile(chunkDir, blockID, info); + return getChunkFile(chunkDir, blockID, chunkName); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index ab78c6055cdf..f0713469e61f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -345,7 +345,7 @@ private ScanResult scanBlock(BlockData block, DataTransferThrottler throttler, File chunkFile; try { chunkFile = layout.getChunkFile(onDiskContainerData, - block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk)); + block.getBlockID(), chunk.getChunkName()); } catch (IOException ex) { return ScanResult.unhealthy( ScanResult.FailureType.MISSING_CHUNK_FILE, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 16847d1157c5..90ee356ab59d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -23,7 +23,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.List; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -32,7 +31,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -65,12 +63,6 @@ private KeyValueContainerUtil() { private static final Logger LOG = LoggerFactory.getLogger( KeyValueContainerUtil.class); - /** - * - * @param containerMetaDataPath - * @throws IOException - */ - /** * creates metadata path, chunks path and metadata DB for the specified * container. @@ -411,46 +403,9 @@ private static void initializeUsedBytesAndBlockCount(DatanodeStore store, } public static long getBlockLength(BlockData block) throws IOException { - long blockLen = 0; - List chunkInfoList = block.getChunks(); - - for (ContainerProtos.ChunkInfo chunk : chunkInfoList) { - ChunkInfo info = ChunkInfo.getFromProtoBuf(chunk); - blockLen += info.getLen(); - } - - return blockLen; - } - - /** - * Returns the path where data or chunks live for a given container. - * - * @param kvContainerData - KeyValueContainerData - * @return - Path to the chunks directory - */ - public static Path getDataDirectory(KeyValueContainerData kvContainerData) { - - String chunksPath = kvContainerData.getChunksPath(); - Preconditions.checkNotNull(chunksPath); - - return Paths.get(chunksPath); - } - - /** - * Container metadata directory -- here is where the RocksDB and - * .container file lives. - * - * @param kvContainerData - KeyValueContainerData - * @return Path to the metadata directory - */ - public static Path getMetadataDirectory( - KeyValueContainerData kvContainerData) { - - String metadataPath = kvContainerData.getMetadataPath(); - Preconditions.checkNotNull(metadataPath); - - return Paths.get(metadataPath); - + return block.getChunks().stream() + .mapToLong(ContainerProtos.ChunkInfo::getLen) + .sum(); } public static boolean isSameSchemaVersion(String schema, String other) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 040b03c3dce2..9a607ceebed9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -96,7 +96,7 @@ private static void checkLayoutVersion(Container container) { public String streamInit(Container container, BlockID blockID) throws StorageContainerException { checkLayoutVersion(container); - File chunkFile = getChunkFile(container, blockID, null); + final File chunkFile = getChunkFile(container, blockID); return chunkFile.getAbsolutePath(); } @@ -105,7 +105,7 @@ public StateMachine.DataChannel getStreamDataChannel( Container container, BlockID blockID, ContainerMetrics metrics) throws StorageContainerException { checkLayoutVersion(container); - File chunkFile = getChunkFile(container, blockID, null); + final File chunkFile = getChunkFile(container, blockID); return new KeyValueStreamDataChannel(chunkFile, container.getContainerData(), metrics); } @@ -137,7 +137,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - File chunkFile = getChunkFile(container, blockID, info); + final File chunkFile = getChunkFile(container, blockID); long len = info.getLen(); long offset = info.getOffset(); @@ -188,7 +188,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, HddsVolume volume = containerData.getVolume(); - File chunkFile = getChunkFile(container, blockID, info); + final File chunkFile = getChunkFile(container, blockID); final long len = info.getLen(); long offset = info.getOffset(); @@ -213,7 +213,7 @@ public void deleteChunks(Container container, BlockData blockData) @Override public void finishWriteChunks(KeyValueContainer container, BlockData blockData) throws IOException { - File chunkFile = getChunkFile(container, blockData.getBlockID(), null); + final File chunkFile = getChunkFile(container, blockData.getBlockID()); try { files.close(chunkFile); verifyChunkFileExists(chunkFile); @@ -230,7 +230,7 @@ private void deleteChunk(Container container, BlockID blockID, Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - File file = getChunkFile(container, blockID, info); + final File file = getChunkFile(container, blockID); // if the chunk file does not exist, it might have already been deleted. // The call might be because of reapply of transactions on datanode @@ -250,10 +250,8 @@ private void deleteChunk(Container container, BlockID blockID, LOG.info("Deleted block file: {}", file); } - private File getChunkFile(Container container, BlockID blockID, - ChunkInfo info) throws StorageContainerException { - return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, - info); + private static File getChunkFile(Container container, BlockID blockID) throws StorageContainerException { + return FILE_PER_BLOCK.getChunkFile(container.getContainerData(), blockID, null); } private static void checkFullDelete(ChunkInfo info, File chunkFile) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 31a340f310b8..a649f573bf08 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -342,8 +342,7 @@ public void deleteChunks(Container container, BlockData blockData) private static File getChunkFile(KeyValueContainer container, BlockID blockID, ChunkInfo info) throws StorageContainerException { - return FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, - info); + return FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, info.getChunkName()); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 60dfe8509bda..b24a6f04c488 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.DBHandle; @@ -123,9 +122,7 @@ public void testKeyValueContainerCheckCorruption( assertFalse(block.getChunks().isEmpty()); ContainerProtos.ChunkInfo c = block.getChunks().get(0); BlockID blockID = block.getBlockID(); - ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(c); - File chunkFile = getChunkLayout() - .getChunkFile(containerData, blockID, chunkInfo); + File chunkFile = getChunkLayout().getChunkFile(containerData, blockID, c.getChunkName()); long length = chunkFile.length(); assertThat(length).isGreaterThan(0); // forcefully truncate the file to induce failure. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index d2206a7fd680..47d24874749e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -76,7 +76,7 @@ public void testReadOversizeChunk() throws IOException { // write chunk bypassing size limit File chunkFile = getStrategy().getLayout() - .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo); + .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo.getChunkName()); FileUtils.writeByteArrayToFile(chunkFile, array); // WHEN+THEN diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java index f83216b7126e..27a0bc81d6f6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java @@ -67,7 +67,7 @@ public void testWriteChunkStageWriteAndCommit() throws Exception { long term = 0; long index = 0; File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK - .getChunkFile(container.getContainerData(), blockID, chunkInfo); + .getChunkFile(container.getContainerData(), blockID, chunkInfo.getChunkName()); File tempChunkFile = new File(chunkFile.getParent(), chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX @@ -109,7 +109,7 @@ public void deletesChunkFileWithLengthIncludingOffset() throws Exception { ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen()); File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile( - container.getContainerData(), blockID, chunkInfo); + container.getContainerData(), blockID, chunkInfo.getChunkName()); ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index b71dd1c01566..012ab989d522 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -43,7 +43,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -158,7 +157,7 @@ protected void execute(OzoneClient client, OzoneAddress address) String fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), - ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); + chunkInfo.getChunkName()).toString(); chunkPaths.add(fileName); ChunkDetails chunkDetails = new ChunkDetails(); chunkDetails.setChunkName(fileName); From babf85c762ec159b6086145299f9a69b0a27f2ae Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Fri, 23 Feb 2024 20:35:43 +0800 Subject: [PATCH 049/108] HDDS-10403. CopyObject should set ETag based on the key content (#6251) --- .../src/main/smoketest/s3/objectcopy.robot | 14 ++++++++++ .../ozone/s3/endpoint/ObjectEndpoint.java | 17 +++++++----- .../s3/endpoint/ObjectEndpointStreaming.java | 9 ++++--- .../hadoop/ozone/client/OzoneBucketStub.java | 2 +- .../ozone/s3/endpoint/TestObjectPut.java | 27 +++++++++++++++++++ 5 files changed, 58 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot index 21764d65c440..af7571d35b8d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectcopy.robot @@ -37,15 +37,26 @@ Create Dest Bucket Copy Object Happy Scenario Run Keyword if '${DESTBUCKET}' == 'generated1' Create Dest Bucket Execute date > /tmp/copyfile + ${file_checksum} = Execute md5sum /tmp/copyfile | awk '{print $1}' + ${result} = Execute AWSS3ApiCli put-object --bucket ${BUCKET} --key ${PREFIX}/copyobject/key=value/f1 --body /tmp/copyfile + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + ${result} = Execute AWSS3ApiCli list-objects --bucket ${BUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 #copying again will not throw error ${result} = Execute AWSS3ApiCli copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/${PREFIX}/copyobject/key=value/f1 + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" + ${result} = Execute AWSS3ApiCli list-objects --bucket ${DESTBUCKET} --prefix ${PREFIX}/copyobject/key=value/ Should contain ${result} f1 @@ -56,8 +67,11 @@ Copy Object Where Bucket is not available Should contain ${result} NoSuchBucket Copy Object Where both source and dest are same with change to storageclass + ${file_checksum} = Execute md5sum /tmp/copyfile | awk '{print $1}' ${result} = Execute AWSS3APICli copy-object --storage-class REDUCED_REDUNDANCY --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${DESTBUCKET}/${PREFIX}/copyobject/key=value/f1 Should contain ${result} ETag + ${eTag} = Execute and checkrc echo '${result}' | jq -r '.CopyObjectResult.ETag' 0 + Should Be Equal ${eTag} \"${file_checksum}\" Copy Object Where Key not available ${result} = Execute AWSS3APICli and checkrc copy-object --bucket ${DESTBUCKET} --key ${PREFIX}/copyobject/key=value/f1 --copy-source ${BUCKET}/nonnonexistentkey 255 diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 24115abe8e6b..0514125abd10 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -1118,13 +1118,14 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, PerformanceStringBuilder perf, long startNanos) throws IOException { long copyLength; + src = new DigestInputStream(src, E_TAG_PROVIDER.get()); if (datastreamEnabled && !(replication != null && replication.getReplicationType() == EC) && srcKeyLen > datastreamMinLength) { perf.appendStreamMode(); copyLength = ObjectEndpointStreaming .copyKeyWithStream(volume.getBucket(destBucket), destKey, srcKeyLen, - chunkSize, replication, metadata, src, perf, startNanos); + chunkSize, replication, metadata, (DigestInputStream) src, perf, startNanos); } else { try (OzoneOutputStream dest = getClientProtocol() .createKey(volume.getName(), destBucket, destKey, srcKeyLen, @@ -1133,6 +1134,10 @@ void copy(OzoneVolume volume, InputStream src, long srcKeyLen, getMetrics().updateCopyKeyMetadataStats(startNanos); perf.appendMetaLatencyNanos(metadataLatencyNs); copyLength = IOUtils.copyLarge(src, dest); + String eTag = DatatypeConverter.printHexBinary( + ((DigestInputStream) src).getMessageDigest().digest()) + .toLowerCase(); + dest.getMetadata().put(ETAG, eTag); } } getMetrics().incCopyObjectSuccessLength(copyLength); @@ -1151,8 +1156,9 @@ private CopyObjectResponse copyObject(OzoneVolume volume, String sourceBucket = result.getLeft(); String sourceKey = result.getRight(); try { + OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( + volume.getName(), sourceBucket, sourceKey); // Checking whether we trying to copying to it self. - if (sourceBucket.equals(destBucket) && sourceKey .equals(destkey)) { // When copying to same storage type when storage type is provided, @@ -1171,15 +1177,12 @@ private CopyObjectResponse copyObject(OzoneVolume volume, // still does not support this just returning dummy response // for now CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(sourceKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(Instant.ofEpochMilli( Time.now())); return copyObjectResponse; } } - - OzoneKeyDetails sourceKeyDetails = getClientProtocol().getKeyDetails( - volume.getName(), sourceBucket, sourceKey); long sourceKeyLen = sourceKeyDetails.getDataSize(); try (OzoneInputStream src = getClientProtocol().getKey(volume.getName(), @@ -1194,7 +1197,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume, getMetrics().updateCopyObjectSuccessStats(startNanos); CopyObjectResponse copyObjectResponse = new CopyObjectResponse(); - copyObjectResponse.setETag(OzoneUtils.getRequestID()); + copyObjectResponse.setETag(wrapInQuotes(destKeyDetails.getMetadata().get(ETAG))); copyObjectResponse.setLastModified(destKeyDetails.getModificationTime()); return copyObjectResponse; } catch (OMException ex) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java index bbb743ee3597..b916fc111d27 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpointStreaming.java @@ -122,15 +122,18 @@ public static long copyKeyWithStream( int bufferSize, ReplicationConfig replicationConfig, Map keyMetadata, - InputStream body, PerformanceStringBuilder perf, long startNanos) + DigestInputStream body, PerformanceStringBuilder perf, long startNanos) throws IOException { - long writeLen = 0; + long writeLen; try (OzoneDataStreamOutput streamOutput = bucket.createStreamKey(keyPath, length, replicationConfig, keyMetadata)) { long metadataLatencyNs = METRICS.updateCopyKeyMetadataStats(startNanos); - perf.appendMetaLatencyNanos(metadataLatencyNs); writeLen = writeToStreamOutput(streamOutput, body, bufferSize, length); + String eTag = DatatypeConverter.printHexBinary(body.getMessageDigest().digest()) + .toLowerCase(); + perf.appendMetaLatencyNanos(metadataLatencyNs); + ((KeyMetadataAware)streamOutput).getMetadata().put(OzoneConsts.ETAG, eTag); } return writeLen; } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 39ae9cc4af17..0cbe0781c4ba 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -209,7 +209,7 @@ public OzoneDataStreamOutput createStreamKey(String key, long size, Map keyMetadata) throws IOException { ByteBufferStreamOutput byteBufferStreamOutput = - new ByteBufferStreamOutput() { + new KeyMetadataAwareByteBufferStreamOutput(keyMetadata) { private final ByteBuffer buffer = ByteBuffer.allocate((int) size); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index ae8279f25861..0daa666ae4c7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -26,9 +26,11 @@ import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; @@ -52,7 +54,9 @@ import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.eq; @@ -108,9 +112,12 @@ public void testPutObject() throws IOException, OS3Exception { .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } @Test @@ -136,9 +143,12 @@ public void testPutObjectWithECReplicationConfig() .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } @Test @@ -208,9 +218,12 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { clientStub.getObjectStore().getS3Bucket(bucketName) .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals("1234567890abcde", keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); } @Test @@ -230,10 +243,14 @@ public void testCopyObject() throws IOException, OS3Exception { .readKey(keyName); String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails keyDetails = clientStub.getObjectStore().getS3Bucket(bucketName).getKey(keyName); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + String sourceETag = keyDetails.getMetadata().get(OzoneConsts.ETAG); // Add copy header, and then call put when(headers.getHeaderString(COPY_SOURCE_HEADER)).thenReturn( @@ -247,9 +264,19 @@ public void testCopyObject() throws IOException, OS3Exception { .readKey(destkey); keyContent = IOUtils.toString(ozoneInputStream, UTF_8); + OzoneKeyDetails sourceKeyDetails = clientStub.getObjectStore() + .getS3Bucket(bucketName).getKey(keyName); + OzoneKeyDetails destKeyDetails = clientStub.getObjectStore() + .getS3Bucket(destBucket).getKey(destkey); assertEquals(200, response.getStatus()); assertEquals(CONTENT, keyContent); + assertNotNull(keyDetails.getMetadata()); + assertTrue(StringUtils.isNotEmpty(keyDetails.getMetadata().get(OzoneConsts.ETAG))); + // Source key eTag should remain unchanged and the dest key should have + // the same Etag since the key content is the same + assertEquals(sourceETag, sourceKeyDetails.getMetadata().get(OzoneConsts.ETAG)); + assertEquals(sourceETag, destKeyDetails.getMetadata().get(OzoneConsts.ETAG)); // source and dest same OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( From e9f11f0b5fe07acc380eb3f5dfea544d794d4812 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Fri, 23 Feb 2024 23:49:21 +0800 Subject: [PATCH 050/108] HDDS-10404. Ozone admin reconfig command fails with security enabled (#6253) --- .../apache/hadoop/hdds/HddsConfigKeys.java | 3 + .../src/main/resources/ozone-default.xml | 8 +++ .../HddsDatanodeClientProtocolServer.java | 11 +++- .../hadoop/ozone/HddsPolicyProvider.java | 63 +++++++++++++++++++ .../docs/content/feature/Reconfigurability.md | 27 ++++---- .../content/feature/Reconfigurability.zh.md | 27 ++++---- ...nfigureProtocolClientSideTranslatorPB.java | 44 +++++++++---- .../ReconfigureProtocolDatanodePB.java | 33 ++++++++++ .../protocolPB/ReconfigureProtocolOmPB.java | 33 ++++++++++ .../protocolPB/ReconfigureProtocolPB.java | 2 +- ...nfigureProtocolServerSideTranslatorPB.java | 2 +- .../hdds/scm/server/SCMPolicyProvider.java | 7 ++- .../main/compose/ozonesecure-ha/docker-config | 1 + .../compose/ozonesecure-ha/test-leadership.sh | 2 +- .../main/compose/ozonesecure/docker-config | 1 + .../main/smoketest/admincli/reconfigure.robot | 38 +++++++++++ hadoop-ozone/dist/src/shell/ozone/ozone | 1 + .../hadoop/ozone/shell/TestReconfigShell.java | 12 ++-- .../hadoop/ozone/om/OMPolicyProvider.java | 6 +- .../apache/hadoop/ozone/om/OzoneManager.java | 4 +- .../AbstractReconfigureSubCommand.java | 5 +- .../admin/reconfig/ReconfigureCommands.java | 10 +++ .../ReconfigurePropertiesSubcommand.java | 5 +- .../reconfig/ReconfigureStartSubcommand.java | 5 +- .../reconfig/ReconfigureStatusSubcommand.java | 5 +- .../reconfig/ReconfigureSubCommandUtil.java | 10 +-- 26 files changed, 298 insertions(+), 67 deletions(-) create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/reconfigure.robot diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index 787f023df2ea..d0c31bf2884f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -338,6 +338,9 @@ private HddsConfigKeys() { HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL = "hdds.security.client.scm.secretkey.datanode.protocol.acl"; + public static final String OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL = + "ozone.security.reconfigure.protocol.acl"; + // Determines if the Container Chunk Manager will write user data to disk // Set to false only for specific performance tests public static final String HDDS_CONTAINER_PERSISTDATA = diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 251ad5c2ff30..e5e3726beb5d 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2782,6 +2782,14 @@ manager admin protocol. + + ozone.security.reconfigure.protocol.acl + * + SECURITY + + Comma separated list of users and groups allowed to access reconfigure protocol. + + hdds.datanode.http.auth.kerberos.principal diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java index e26610b357e7..8b0b3a7ca239 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeClientProtocolServer.java @@ -20,12 +20,13 @@ import com.google.protobuf.BlockingService; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos; -import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; +import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolDatanodePB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; @@ -66,6 +67,10 @@ protected HddsDatanodeClientProtocolServer( HDDS_DATANODE_CLIENT_ADDRESS_KEY, HddsUtils.getDatanodeRpcAddress(conf), rpcServer); datanodeDetails.setPort(CLIENT_RPC, clientRpcAddress.getPort()); + if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, + false)) { + rpcServer.refreshServiceAcl(conf, HddsPolicyProvider.getInstance()); + } } public void start() { @@ -97,7 +102,7 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration, InetSocketAddress rpcAddress = HddsUtils.getDatanodeRpcAddress(conf); // Add reconfigureProtocolService. RPC.setProtocolEngine( - configuration, ReconfigureProtocolPB.class, ProtobufRpcEngine.class); + configuration, ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class); final int handlerCount = conf.getInt(HDDS_DATANODE_HANDLER_COUNT_KEY, HDDS_DATANODE_HANDLER_COUNT_DEFAULT); @@ -108,7 +113,7 @@ private RPC.Server getRpcServer(OzoneConfiguration configuration, reconfigureServerProtocol); return preserveThreadName(() -> startRpcServer(configuration, rpcAddress, - ReconfigureProtocolPB.class, reconfigureService, handlerCount)); + ReconfigureProtocolDatanodePB.class, reconfigureService, handlerCount)); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java new file mode 100644 index 000000000000..eeed4fab5f72 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsPolicyProvider.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone; + + +import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; +import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.security.authorize.PolicyProvider; +import org.apache.hadoop.security.authorize.Service; +import org.apache.ratis.util.MemoizedSupplier; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Supplier; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; + +/** + * {@link PolicyProvider} for Datanode protocols. + */ +@Private +@Unstable +public final class HddsPolicyProvider extends PolicyProvider { + + private static final Supplier SUPPLIER = + MemoizedSupplier.valueOf(HddsPolicyProvider::new); + + private HddsPolicyProvider() { + } + + @Private + @Unstable + public static HddsPolicyProvider getInstance() { + return SUPPLIER.get(); + } + + private static final List DN_SERVICES = + Arrays.asList( + new Service( + OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) + ); + + @Override + public Service[] getServices() { + return DN_SERVICES.toArray(new Service[0]); + } +} diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.md b/hadoop-hdds/docs/content/feature/Reconfigurability.md index af220554ba82..8aa0579139de 100644 --- a/hadoop-hdds/docs/content/feature/Reconfigurability.md +++ b/hadoop-hdds/docs/content/feature/Reconfigurability.md @@ -28,10 +28,11 @@ If a property is reconfigurable, you can modify it in the configuration file (`o command: ```shell -ozone admin reconfig --address= start|status|properties +ozone admin reconfig --service=[OM|SCM|DATANODE] --address= start|status|properties ``` The meaning of command options: +- **--service**: The node type of the server specified with --address - **--address**: RPC address for one server - Three operations are provided: - **start**: Execute the reconfig operation asynchronously @@ -40,60 +41,60 @@ The meaning of command options: ## Retrieve the reconfigurable properties list To retrieve all the reconfigurable properties list for a specific component in Ozone, -you can use the command: `ozone admin reconfig --address= properties`. +you can use the command: `ozone admin reconfig --service=[OM|SCM|DATANODE] --address= properties`. This command will list all the properties that can be dynamically reconfigured at runtime for specific component.
> For example, get the Ozone OM reconfigurable properties list. > ->$ `ozone admin reconfig --address=hadoop1:9862 properties`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## OM Reconfigurability >For example, modify `ozone.administrators` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:9862 start`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 start`
OM: Started OM reconfiguration task on node [hadoop1:9862]. > ->$ `ozone admin reconfig --address=hadoop1:9862 status`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 status`
OM: Reconfiguring status for node [hadoop1:9862]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig -address=hadoop1:9862 properties`
+> $ `ozone admin reconfig --service=OM -address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## SCM Reconfigurability >For example, modify `ozone.administrators` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:9860 start`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 start`
SCM: Started OM reconfiguration task on node [hadoop1:9860]. > ->$ `ozone admin reconfig --address=hadoop1:9860 status`
+>$ `ozone admin reconfig --service=SCM --address=hadoop1:9860 status`
SCM: Reconfiguring status for node [hadoop1:9860]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig -address=hadoop1:9860 properties`
+> $ `ozone admin reconfig --service=SCM -address=hadoop1:9860 properties`
SCM: Node [hadoop1:9860] Reconfigurable properties:
ozone.administrators ## Datanode Reconfigurability >For example, modify `ozone.example.config` in ozone-site.xml and execute: > -> $ `ozone admin reconfig --address=hadoop1:19864 start`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 start`
Datanode: Started reconfiguration task on node [hadoop1:19864]. > ->$ `ozone admin reconfig --address=hadoop1:19864 status`
+>$ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 status`
Datanode: Reconfiguring status for node [hadoop1:19864]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.example.config
From: "old"
To: "new" > -> $ `ozone admin reconfig -address=hadoop1:19864 properties`
+> $ `ozone admin reconfig --service=DATANODE -address=hadoop1:19864 properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config @@ -104,7 +105,7 @@ Currently, only Datanode supports batch operations
>For example, to list the reconfigurable properties of all Datanodes:
-> $ `ozone admin reconfig --in-service-datanodes properties`
+> $ `ozone admin reconfig --service=DATANODE --in-service-datanodes properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config
Datanode: Node [hadoop2:19864] Reconfigurable properties:
diff --git a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md index 8e983a98ab8d..957f0510548e 100644 --- a/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md +++ b/hadoop-hdds/docs/content/feature/Reconfigurability.zh.md @@ -27,10 +27,11 @@ Ozone支持在不重启服务的情况下动态加载某些配置。如果某个 命令: ```shell -ozone admin reconfig --address= start|status|properties +ozone admin reconfig --service=[OM|SCM|DATANODE] --address= start|status|properties ``` 命令项的含义: +- **--service**: --address 指定节点的Ozone服务类型 - **--address**: 一台服务所在的主机与客户端通信的RPC地址 - 提供3中操作: - **start**: 开始异步执行动态加载配置 @@ -38,44 +39,44 @@ ozone admin reconfig --address= start|status|properties - **properties**: 列出支持动态加载的配置项 ## 获取可动态加载的属性列表 -要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --address= properties`。 +要获取 Ozone 中指定组件的可动态加载属性列表, 可以使用命令 `ozone admin reconfig --service=[OM|SCM|DATANODE] --address= properties`。 这个命令将会列出所有可以在运行时动态加载的属性。 > 例如, 获取 Ozone OM 可动态加载属性列表 > ->$ `ozone admin reconfig --address=hadoop1:9862 properties`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## OM动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:9862 start`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 start`
OM: Started reconfiguration task on node [hadoop1:9862]. > ->$ `ozone admin reconfig --address=hadoop1:9862 status`
+>$ `ozone admin reconfig --service=OM --address=hadoop1:9862 status`
OM: Reconfiguring status for node [hadoop1:9862]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig --address=hadoop1:9862 properties`
+> $ `ozone admin reconfig --service=OM --address=hadoop1:9862 properties`
OM: Node [hadoop1:9862] Reconfigurable properties:
ozone.administrators ## SCM动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.administrators`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:9860 start`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 start`
SCM: Started reconfiguration task on node [hadoop1:9860]. > ->$ `ozone admin reconfig --address=hadoop1:9860 status`
+>$ `ozone admin reconfig --service=SCM --address=hadoop1:9860 status`
SCM: Reconfiguring status for node [hadoop1:9860]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.administrators
From: "hadoop"
To: "hadoop,bigdata" > -> $ `ozone admin reconfig --address=hadoop1:9860 properties`
+> $ `ozone admin reconfig --service=SCM --address=hadoop1:9860 properties`
SCM: Node [hadoop1:9860] Reconfigurable properties:
ozone.administrators @@ -83,16 +84,16 @@ ozone.administrators ## Datanode 动态配置 >例如, 在`ozone-site.xml`文件中修改`ozone.example.config`的值并执行: > -> $ `ozone admin reconfig --address=hadoop1:19864 start`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 start`
Datanode: Started reconfiguration task on node [hadoop1:19864]. > ->$ `ozone admin reconfig --address=hadoop1:19864 status`
+>$ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 status`
Datanode: Reconfiguring status for node [hadoop1:19864]: started at Wed Dec 28 19:04:44 CST 2022 and finished at Wed Dec 28 19:04:44 CST 2022.
SUCCESS: Changed property ozone.example.config
From: "old"
To: "new" > -> $ `ozone admin reconfig --address=hadoop1:19864 properties`
+> $ `ozone admin reconfig --service=DATANODE --address=hadoop1:19864 properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config @@ -104,7 +105,7 @@ ozone.example.config >例如, 列出 Datanode 所有可配置的属性:
-> $ `ozone admin reconfig --in-service-datanodes properties`
+> $ `ozone admin reconfig --service=DATANODE --in-service-datanodes properties`
Datanode: Node [hadoop1:19864] Reconfigurable properties:
ozone.example.config
Datanode: Node [hadoop2:19864] Reconfigurable properties:
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java index 85acc1431fc8..0ab92cfee02c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolClientSideTranslatorPB.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetServerNameRequestProto; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetServerNameResponseProto; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.GetConfigurationChangeProto; @@ -82,26 +83,45 @@ public class ReconfigureProtocolClientSideTranslatorPB implements private final ReconfigureProtocolPB rpcProxy; - public ReconfigureProtocolClientSideTranslatorPB(InetSocketAddress addr, + public ReconfigureProtocolClientSideTranslatorPB(HddsProtos.NodeType nodeType, InetSocketAddress addr, UserGroupInformation ugi, OzoneConfiguration conf) throws IOException { - rpcProxy = createReconfigureProtocolProxy(addr, ugi, conf); + rpcProxy = createReconfigureProtocolProxy(nodeType, addr, ugi, conf); } - static ReconfigureProtocolPB createReconfigureProtocolProxy( + static ReconfigureProtocolPB createReconfigureProtocolProxy(HddsProtos.NodeType nodeType, InetSocketAddress addr, UserGroupInformation ugi, OzoneConfiguration conf) throws IOException { - - RPC.setProtocolEngine(OzoneConfiguration.of(conf), - ReconfigureProtocolPB.class, ProtobufRpcEngine.class); Configuration hadoopConf = LegacyHadoopConfigurationSource .asHadoopConfiguration(conf); - return RPC.getProtocolProxy( - ReconfigureProtocolPB.class, - RPC.getProtocolVersion(ReconfigureProtocolPB.class), - addr, ugi, hadoopConf, - NetUtils.getDefaultSocketFactory(hadoopConf)) - .getProxy(); + if (nodeType == HddsProtos.NodeType.OM) { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolOmPB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolOmPB.class, + RPC.getProtocolVersion(ReconfigureProtocolOmPB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } else if (nodeType == HddsProtos.NodeType.DATANODE) { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolDatanodePB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolDatanodePB.class, + RPC.getProtocolVersion(ReconfigureProtocolDatanodePB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } else { + RPC.setProtocolEngine(OzoneConfiguration.of(conf), + ReconfigureProtocolPB.class, ProtobufRpcEngine.class); + return RPC.getProtocolProxy( + ReconfigureProtocolPB.class, + RPC.getProtocolVersion(ReconfigureProtocolPB.class), + addr, ugi, hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf)) + .getProxy(); + } } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java new file mode 100644 index 000000000000..49e95b9c26f2 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolDatanodePB.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +/** + * Protocol that clients use to communicate with the DN to do + * reconfiguration on the fly. + */ +@ProtocolInfo( + protocolName = "org.apache.hadoop.hdds.protocol.ReconfigureProtocol", + protocolVersion = 1) +@KerberosInfo(serverPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY) +public interface ReconfigureProtocolDatanodePB extends ReconfigureProtocolPB { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java new file mode 100644 index 000000000000..2775e71efa74 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolOmPB.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +/** + * Protocol that clients use to communicate with the OM to do + * reconfiguration on the fly. + */ +@ProtocolInfo( + protocolName = "org.apache.hadoop.hdds.protocol.ReconfigureProtocol", + protocolVersion = 1) +// TODO: move OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY to hdds-common. +@KerberosInfo(serverPrincipal = "ozone.om.kerberos.principal") +public interface ReconfigureProtocolOmPB extends ReconfigureProtocolPB { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java index e1702ce0ada8..cb31a366ad7d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolPB.java @@ -23,7 +23,7 @@ import org.apache.hadoop.security.KerberosInfo; /** - * Protocol that clients use to communicate with the OM/SCM to do + * Protocol that clients use to communicate with the SCM to do * reconfiguration on the fly. */ @ProtocolInfo( diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java index 8db07cbc80f3..7a6a5a904244 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/ReconfigureProtocolServerSideTranslatorPB.java @@ -45,7 +45,7 @@ * ReconfigureProtocol. */ public class ReconfigureProtocolServerSideTranslatorPB implements - ReconfigureProtocolPB { + ReconfigureProtocolPB, ReconfigureProtocolOmPB, ReconfigureProtocolDatanodePB { private final ReconfigureProtocol impl; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java index 0ea2d0e9559b..9cbd6d97deda 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.hdds.protocol.SecretKeyProtocolDatanode; import org.apache.hadoop.hdds.protocol.SecretKeyProtocolOm; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; @@ -43,6 +44,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_OM_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_SCM_PROTOCOL_ACL; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; /** * {@link PolicyProvider} for SCM protocols. @@ -85,7 +87,10 @@ public static SCMPolicyProvider getInstance() { SecretKeyProtocolScm.class), new Service( HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL, - SecretKeyProtocolDatanode.class) + SecretKeyProtocolDatanode.class), + new Service( + OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) ); @Override diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index b9fc5344c722..20b37c78fc25 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -138,6 +138,7 @@ HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* +HADOOP-POLICY.XML_ozone.security.reconfigure.protocol.acl=* HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-leadership.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-leadership.sh index 605d8ebcfbd1..1ab13ce6ff14 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-leadership.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/test-leadership.sh @@ -31,7 +31,7 @@ start_docker_env execute_robot_test s3g kinit.robot -execute_robot_test s3g admincli +execute_robot_test s3g admincli/scmrole.robot execute_robot_test s3g omha/om-fetch-key.robot diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index 53f1a63d97ae..4c8ce1a27400 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -130,6 +130,7 @@ HADOOP-POLICY.XML_hdds.security.client.datanode.container.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.scm.container.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.scm.block.protocol.acl=* HADOOP-POLICY.XML_hdds.security.client.scm.certificate.protocol.acl=* +HADOOP-POLICY.XML_ozone.security.reconfigure.protocol.acl=* HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/reconfigure.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/reconfigure.robot new file mode 100644 index 000000000000..0bf2feead607 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/reconfigure.robot @@ -0,0 +1,38 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test ozone admin command +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Keywords *** +Setup Test + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + +*** Test Cases *** +Reconfigure OM + Pass Execution If '${SECURITY_ENABLED}' == 'false' N/A + ${output} = Execute ozone admin reconfig --address=om:9862 --service=OM start + Should Contain ${output} Started reconfiguration task on node +Reconfigure SCM + Pass Execution If '${SECURITY_ENABLED}' == 'false' N/A + ${output} = Execute ozone admin reconfig --address=scm:9860 --service=SCM start + Should Contain ${output} Started reconfiguration task on node +Reconfigure DN + Pass Execution If '${SECURITY_ENABLED}' == 'false' N/A + ${output} = Execute ozone admin reconfig --address=datanode:19864 --service=DATANODE start + Should Contain ${output} Started reconfiguration task on node \ No newline at end of file diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 8686f56c287a..92edee8372f3 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -213,6 +213,7 @@ function ozonecmd_case ;; admin) OZONE_CLASSNAME=org.apache.hadoop.hdds.cli.OzoneAdmin + OZONE_ADMIN_OPTS="${OZONE_ADMIN_OPTS} ${OZONE_MODULE_ACCESS_ARGS}" OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; debug) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 427b36d9a952..3a36f8eaba3c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -95,7 +95,7 @@ public void testDataNodeGetReconfigurableProperties() throws Exception { HddsDatanodeClientProtocolServer server = datanodeService.getClientProtocolServer(); InetSocketAddress socket = server.getClientRpcAddress(); - executeAndAssertProperties(datanodeService.getReconfigurationHandler(), + executeAndAssertProperties(datanodeService.getReconfigurationHandler(), "--service=DATANODE", socket, capture); } } @@ -105,7 +105,7 @@ public void testDataNodeGetReconfigurableProperties() throws Exception { public void testOzoneManagerGetReconfigurationProperties() throws Exception { try (SystemOutCapturer capture = new SystemOutCapturer()) { InetSocketAddress socket = ozoneManager.getOmRpcServerAddr(); - executeAndAssertProperties(ozoneManager.getReconfigurationHandler(), + executeAndAssertProperties(ozoneManager.getReconfigurationHandler(), "--service=OM", socket, capture); } } @@ -116,17 +116,17 @@ public void testStorageContainerManagerGetReconfigurationProperties() try (SystemOutCapturer capture = new SystemOutCapturer()) { InetSocketAddress socket = storageContainerManager.getClientRpcAddress(); executeAndAssertProperties( - storageContainerManager.getReconfigurationHandler(), socket, capture); + storageContainerManager.getReconfigurationHandler(), "--service=SCM", socket, capture); } } private void executeAndAssertProperties( - ReconfigurableBase reconfigurableBase, + ReconfigurableBase reconfigurableBase, String service, InetSocketAddress socket, SystemOutCapturer capture) throws UnsupportedEncodingException { String address = socket.getHostString() + ":" + socket.getPort(); ozoneAdmin.execute( - new String[] {"reconfig", "--address", address, "properties"}); + new String[] {"reconfig", service, "--address", address, "properties"}); assertReconfigurablePropertiesOutput( reconfigurableBase.getReconfigurableProperties(), capture.getOutput()); } @@ -171,7 +171,7 @@ private void executeAndAssertBulkReconfigCount(int except) throws Exception { try (SystemOutCapturer capture = new SystemOutCapturer()) { ozoneAdmin.execute(new String[] { - "reconfig", "--in-service-datanodes", "properties"}); + "reconfig", "--service=DATANODE", "--in-service-datanodes", "properties"}); String output = capture.getOutput(); assertThat(capture.getOutput()).contains(String.format("successfully %d", except)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java index 970cd8b95f16..66c525f0712a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPolicyProvider.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.ozone.om.protocol.OMInterServiceProtocol; import org.apache.hadoop.ozone.om.protocol.OMAdminProtocol; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; @@ -31,6 +32,7 @@ import java.util.List; import java.util.function.Supplier; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL; import static org.apache.hadoop.ozone.om.OMConfigKeys .OZONE_OM_SECURITY_CLIENT_PROTOCOL_ACL; @@ -61,7 +63,9 @@ public static OMPolicyProvider getInstance() { new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL, OMInterServiceProtocol.class), new Service(OZONE_OM_SECURITY_ADMIN_PROTOCOL_ACL, - OMAdminProtocol.class) + OMAdminProtocol.class), + new Service(OZONE_SECURITY_RECONFIGURE_PROTOCOL_ACL, + ReconfigureProtocol.class) ); @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index c4e9eb2ed3e2..fda68b416e4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -75,7 +75,7 @@ import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; -import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; +import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolOmPB; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.ratis.RatisHelper; @@ -1283,7 +1283,7 @@ private RPC.Server startRpcServer(OzoneConfiguration conf, interOMProtocolService, rpcServer); HddsServerUtil.addPBProtocol(conf, OMAdminProtocolPB.class, omAdminProtocolService, rpcServer); - HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolPB.class, + HddsServerUtil.addPBProtocol(conf, ReconfigureProtocolOmPB.class, reconfigureProtocolService, rpcServer); if (conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java index 99af758b5bad..0a2666d30ee2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/AbstractReconfigureSubCommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.admin.reconfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine; import java.util.List; @@ -44,10 +45,10 @@ public Void call() throws Exception { " --in-service-datanodes is not given."); return null; } - executeCommand(parent.getAddress()); + executeCommand(parent.getService(), parent.getAddress()); } return null; } - protected abstract void executeCommand(String address); + protected abstract void executeCommand(HddsProtos.NodeType nodeType, String address); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java index 0c25b1f67b3b..fc171e52d8d3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.client.ScmClient; import org.kohsuke.MetaInfServices; @@ -56,6 +57,11 @@ public class ReconfigureCommands implements Callable, @Spec private CommandSpec spec; + @CommandLine.Option(names = {"--service"}, + description = "service: OM, SCM, DATANODE.", + required = true) + private String service; + @CommandLine.Option(names = {"--address"}, description = "node address: or .", required = false) @@ -77,6 +83,10 @@ public String getAddress() { return address; } + public HddsProtos.NodeType getService() { + return HddsProtos.NodeType.valueOf(service); + } + @Override public Class getParentType() { return OzoneAdmin.class; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java index 60bc9c2ef557..99450715ac98 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigurePropertiesSubcommand.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -36,9 +37,9 @@ public class ReconfigurePropertiesSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); List properties = reconfigProxy.listReconfigureProperties(); System.out.printf("%s: Node [%s] Reconfigurable properties:%n", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java index 86d95bf06457..ae2e5a1a7432 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStartSubcommand.java @@ -19,6 +19,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -34,9 +35,9 @@ public class ReconfigureStartSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); reconfigProxy.startReconfigure(); System.out.printf("%s: Started reconfiguration task on node [%s].%n", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java index 20e0ee8281cf..07bd2d6f4ac6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureStatusSubcommand.java @@ -21,6 +21,7 @@ import org.apache.hadoop.conf.ReconfigurationUtil; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.ReconfigureProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import picocli.CommandLine.Command; import java.io.IOException; @@ -40,9 +41,9 @@ public class ReconfigureStatusSubcommand extends AbstractReconfigureSubCommand { @Override - protected void executeCommand(String address) { + protected void executeCommand(HddsProtos.NodeType nodeType, String address) { try (ReconfigureProtocol reconfigProxy = ReconfigureSubCommandUtil - .getSingleNodeReconfigureProxy(address)) { + .getSingleNodeReconfigureProxy(nodeType, address)) { String serverName = reconfigProxy.getServerName(); ReconfigurationTaskStatus status = reconfigProxy.getReconfigureStatus(); System.out.printf("%s: Reconfiguring status for node [%s]: ", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java index e7e1860c2cb7..b24190dceacd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureSubCommandUtil.java @@ -34,7 +34,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Consumer; +import java.util.function.BiConsumer; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; @@ -47,23 +47,23 @@ private ReconfigureSubCommandUtil() { } public static ReconfigureProtocol getSingleNodeReconfigureProxy( - String address) throws IOException { + HddsProtos.NodeType nodeType, String address) throws IOException { OzoneConfiguration ozoneConf = new OzoneConfiguration(); UserGroupInformation user = UserGroupInformation.getCurrentUser(); InetSocketAddress nodeAddr = NetUtils.createSocketAddr(address); - return new ReconfigureProtocolClientSideTranslatorPB( + return new ReconfigureProtocolClientSideTranslatorPB(nodeType, nodeAddr, user, ozoneConf); } public static void parallelExecute(ExecutorService executorService, - List nodes, Consumer operation) { + List nodes, BiConsumer operation) { AtomicInteger successCount = new AtomicInteger(); AtomicInteger failCount = new AtomicInteger(); if (nodes != null) { for (T node : nodes) { executorService.submit(() -> { try { - operation.accept(node); + operation.accept(HddsProtos.NodeType.DATANODE, node); successCount.incrementAndGet(); } catch (Exception e) { failCount.incrementAndGet(); From 0bac7ef8e47cd4c685e38ca8d2a618baf87f8cf4 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 23 Feb 2024 22:00:53 +0100 Subject: [PATCH 051/108] HDDS-10405. ozone admin has hard-coded info loglevel (#6254) --- .../apache/hadoop/hdds/cli/OzoneAdmin.java | 12 --- .../ReplicationManagerStartSubcommand.java | 7 +- .../ReplicationManagerStatusSubcommand.java | 9 +- .../cli/ReplicationManagerStopSubcommand.java | 9 +- .../hdds/scm/cli/SafeModeCheckSubcommand.java | 11 +-- .../hdds/scm/cli/SafeModeExitSubcommand.java | 7 +- .../hdds/scm/cli/SafeModeWaitSubcommand.java | 19 ++--- .../cli/cert/CleanExpiredCertsSubcommand.java | 9 +- .../hdds/scm/cli/cert/InfoSubcommand.java | 16 +--- .../hdds/scm/cli/cert/ListSubcommand.java | 11 +-- .../hdds/scm/cli/cert/ScmCertSubcommand.java | 21 +++-- .../scm/cli/container/CreateSubcommand.java | 7 +- .../scm/cli/container/InfoSubcommand.java | 35 ++++---- .../scm/cli/container/ListSubcommand.java | 7 +- .../scm/cli/container/TestInfoSubCommand.java | 85 +++++-------------- 15 files changed, 73 insertions(+), 192 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java index 093dd93430b9..cc496a28e777 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -22,13 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.log4j.ConsoleAppender; -import org.apache.log4j.Level; -import org.apache.log4j.LogManager; -import org.apache.log4j.Logger; -import org.apache.log4j.PatternLayout; import picocli.CommandLine; /** @@ -75,12 +69,6 @@ public UserGroupInformation getUser() throws IOException { * @param argv - System Args Strings[] */ public static void main(String[] argv) { - LogManager.resetConfiguration(); - Logger.getRootLogger().setLevel(Level.INFO); - Logger.getRootLogger() - .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); - Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - new OzoneAdmin().run(argv); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java index ff82b82ec87a..29f2f3d45727 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStartSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,12 +33,9 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStartSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStartSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { scmClient.startReplicationManager(); - LOG.info("Starting ReplicationManager..."); + System.out.println("Starting ReplicationManager..."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index 9bc3649dd9f0..b2e308e14227 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,18 +33,15 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStatusSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStatusSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getReplicationManagerStatus(); // Output data list if (execReturn) { - LOG.info("ReplicationManager is Running."); + System.out.println("ReplicationManager is Running."); } else { - LOG.info("ReplicationManager is Not Running."); + System.out.println("ReplicationManager is Not Running."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java index 7d3063a7636c..12de13c07d26 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStopSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import java.io.IOException; @@ -35,14 +33,11 @@ versionProvider = HddsVersionProvider.class) public class ReplicationManagerStopSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ReplicationManagerStopSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { scmClient.stopReplicationManager(); - LOG.info("Stopping ReplicationManager..."); - LOG.info("Requested SCM to stop ReplicationManager, " + + System.out.println("Stopping ReplicationManager..."); + System.out.println("Requested SCM to stop ReplicationManager, " + "it might take sometime for the ReplicationManager to stop."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index db2f02c5e125..747215dcac71 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -24,8 +24,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; @@ -39,9 +37,6 @@ versionProvider = HddsVersionProvider.class) public class SafeModeCheckSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeCheckSubcommand.class); - @CommandLine.Option(names = {"--verbose"}, description = "Show detailed status of rules.") private boolean verbose; @@ -52,17 +47,17 @@ public void execute(ScmClient scmClient) throws IOException { // Output data list if (execReturn) { - LOG.info("SCM is in safe mode."); + System.out.println("SCM is in safe mode."); if (verbose) { for (Map.Entry> entry : scmClient.getSafeModeRuleStatuses().entrySet()) { Pair value = entry.getValue(); - LOG.info("validated:{}, {}, {}", + System.out.printf("validated:%s, %s, %s%n", value.getLeft(), entry.getKey(), value.getRight()); } } } else { - LOG.info("SCM is out of safe mode."); + System.out.println("SCM is out of safe mode."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index bcf64deb85e2..e4173c9767e3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -22,8 +22,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; /** @@ -36,14 +34,11 @@ versionProvider = HddsVersionProvider.class) public class SafeModeExitSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeExitSubcommand.class); - @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.forceExitSafeMode(); if (execReturn) { - LOG.info("SCM exit safe mode successfully."); + System.out.println("SCM exit safe mode successfully."); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java index abaca08cfbb9..ad94d4fffd0d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeWaitSubcommand.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Mixin; @@ -39,9 +37,6 @@ versionProvider = HddsVersionProvider.class) public class SafeModeWaitSubcommand implements Callable { - private static final Logger LOG = - LoggerFactory.getLogger(SafeModeWaitSubcommand.class); - @Option(description = "Define timeout (in second) to wait until (exit code 1) " + "or until safemode is ended (exit code 0).", defaultValue = "30", @@ -62,26 +57,26 @@ public Void call() throws Exception { long remainingTime; do { if (!scmClient.inSafeMode()) { - LOG.info("SCM is out of safe mode."); + System.out.println("SCM is out of safe mode."); return null; } remainingTime = getRemainingTimeInSec(); if (remainingTime > 0) { - LOG.info( + System.out.printf( "SCM is in safe mode. Will retry in 1 sec. Remaining time " - + "(sec): {}", + + "(sec): %s%n", remainingTime); Thread.sleep(1000); } else { - LOG.info("SCM is in safe mode. No more retries."); + System.out.println("SCM is in safe mode. No more retries."); } } while (remainingTime > 0); } catch (InterruptedException ex) { - LOG.info( - "SCM is not available (yet?). Error is {}. Will retry in 1 sec. " - + "Remaining time (sec): {}", + System.out.printf( + "SCM is not available (yet?). Error is %s. Will retry in 1 sec. " + + "Remaining time (sec): %s%n", ex.getMessage(), getRemainingTimeInSec()); Thread.sleep(1000); Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java index cab7a29a4ea6..09caf8147ad4 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CleanExpiredCertsSubcommand.java @@ -19,8 +19,6 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import java.io.IOException; @@ -36,13 +34,10 @@ versionProvider = HddsVersionProvider.class) public class CleanExpiredCertsSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(CleanExpiredCertsSubcommand.class); - @Override protected void execute(SCMSecurityProtocol client) throws IOException { List pemEncodedCerts = client.removeExpiredCertificates(); - LOG.info("List of removed expired certificates:"); - printCertList(LOG, pemEncodedCerts); + System.out.println("List of removed expired certificates:"); + printCertList(pemEncodedCerts); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java index 6177c8f7ff4e..c708d424d9c9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/InfoSubcommand.java @@ -26,12 +26,8 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Parameters; -import picocli.CommandLine.Spec; /** * This is the handler that process certificate info command. @@ -44,12 +40,6 @@ class InfoSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - - @Spec - private CommandSpec spec; - @Parameters(description = "Serial id of the certificate in decimal.") private String serialId; @@ -61,12 +51,12 @@ public void execute(SCMSecurityProtocol client) throws IOException { "Certificate can't be found"); // Print container report info. - LOG.info("Certificate id: {}", serialId); + System.out.printf("Certificate id: %s%n", serialId); try { X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr); - LOG.info(cert.toString()); + System.out.println(cert); } catch (CertificateException ex) { - LOG.error("Failed to get certificate id " + serialId); + System.err.println("Failed to get certificate id " + serialId); throw new IOException("Fail to get certificate id " + serialId, ex); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java index c2e0bd7fadff..ea0898381478 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ListSubcommand.java @@ -36,8 +36,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.server.JsonUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -54,9 +52,6 @@ versionProvider = HddsVersionProvider.class) public class ListSubcommand extends ScmCertSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - @Option(names = {"-s", "--start"}, description = "Certificate serial id to start the iteration", defaultValue = "0", showDefaultValue = Visibility.ALWAYS) @@ -114,7 +109,7 @@ protected void execute(SCMSecurityProtocol client) throws IOException { CertificateCodec.getX509Certificate(certPemStr); certList.add(new Certificate(cert)); } catch (CertificateException ex) { - LOG.error("Failed to parse certificate."); + err.println("Failed to parse certificate."); } } System.out.println( @@ -122,9 +117,9 @@ protected void execute(SCMSecurityProtocol client) throws IOException { return; } - LOG.info("Certificate list:(Type={}, BatchSize={}, CertCount={})", + System.out.printf("Certificate list:(Type=%s, BatchSize=%s, CertCount=%s)%n", type.toUpperCase(), count, certPemList.size()); - printCertList(LOG, certPemList); + printCertList(certPemList); } private static class BigIntJsonSerializer extends JsonSerializer { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java index d7ebb44e0ffc..354adbb5d6ba 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/ScmCertSubcommand.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.scm.cli.ScmOption; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.slf4j.Logger; import picocli.CommandLine; import java.io.IOException; @@ -37,29 +36,29 @@ public abstract class ScmCertSubcommand implements Callable { @CommandLine.Mixin private ScmOption scmOption; - private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s"; + private static final String OUTPUT_FORMAT = "%-17s %-30s %-30s %-110s %-110s%n"; - protected void printCertList(Logger log, List pemEncodedCerts) { + protected void printCertList(List pemEncodedCerts) { if (pemEncodedCerts.isEmpty()) { - log.info("No certificates to list"); + System.out.println("No certificates to list"); return; } - log.info(String.format(OUTPUT_FORMAT, "SerialNumber", "Valid From", - "Expiry", "Subject", "Issuer")); + System.out.printf(OUTPUT_FORMAT, "SerialNumber", "Valid From", + "Expiry", "Subject", "Issuer"); for (String certPemStr : pemEncodedCerts) { try { X509Certificate cert = CertificateCodec.getX509Certificate(certPemStr); - printCert(cert, log); + printCert(cert); } catch (CertificateException e) { - log.error("Failed to parse certificate.", e); + System.err.println("Failed to parse certificate: " + e.getMessage()); } } } - protected void printCert(X509Certificate cert, Logger log) { - log.info(String.format(OUTPUT_FORMAT, cert.getSerialNumber(), + protected void printCert(X509Certificate cert) { + System.out.printf(OUTPUT_FORMAT, cert.getSerialNumber(), cert.getNotBefore(), cert.getNotAfter(), cert.getSubjectDN(), - cert.getIssuerDN())); + cert.getIssuerDN()); } protected abstract void execute(SCMSecurityProtocol client) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java index 9eedbf858958..313dc64c9fc9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateSubcommand.java @@ -25,8 +25,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -40,9 +38,6 @@ versionProvider = HddsVersionProvider.class) public class CreateSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(CreateSubcommand.class); - @Option(description = "Owner of the new container", defaultValue = "OZONE", names = { "-o", "--owner"}) private String owner; @@ -50,7 +45,7 @@ public class CreateSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { ContainerWithPipeline container = scmClient.createContainer(owner); - LOG.info("Container {} is created.", + System.out.printf("Container %s is created.%n", container.getContainerInfo().getContainerID()); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 8ed9f520b29d..0e67661bba1d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -45,8 +45,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.server.JsonUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Model.CommandSpec; @@ -63,9 +61,6 @@ versionProvider = HddsVersionProvider.class) public class InfoSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(InfoSubcommand.class); - @Spec private CommandSpec spec; @@ -126,13 +121,13 @@ private void printOutput(ScmClient scmClient, String id, boolean first) private void printHeader() { if (json && multiContainer) { - LOG.info("["); + System.out.println("["); } } private void printFooter() { if (json && multiContainer) { - LOG.info("]"); + System.out.println("]"); } } @@ -142,9 +137,9 @@ private void printError(String error) { private void printBreak() { if (json) { - LOG.info(","); + System.out.println(","); } else { - LOG.info(""); + System.out.println(""); } } @@ -175,47 +170,47 @@ private void printDetails(ScmClient scmClient, long containerID, new ContainerWithPipelineAndReplicas(container.getContainerInfo(), container.getPipeline(), replicas, container.getContainerInfo().getPipelineID()); - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } else { ContainerWithoutDatanodes wrapper = new ContainerWithoutDatanodes(container.getContainerInfo(), container.getPipeline(), replicas, container.getContainerInfo().getPipelineID()); - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } } else { // Print container report info. - LOG.info("Container id: {}", containerID); + System.out.printf("Container id: %s%n", containerID); boolean verbose = spec != null && spec.root().userObject() instanceof GenericParentCommand && ((GenericParentCommand) spec.root().userObject()).isVerbose(); if (verbose) { - LOG.info("Pipeline Info: {}", container.getPipeline()); + System.out.printf("Pipeline Info: %s%n", container.getPipeline()); } else { - LOG.info("Pipeline id: {}", container.getPipeline().getId().getId()); + System.out.printf("Pipeline id: %s%n", container.getPipeline().getId().getId()); } - LOG.info("Write PipelineId: {}", + System.out.printf("Write PipelineId: %s%n", container.getContainerInfo().getPipelineID().getId()); try { String pipelineState = scmClient.getPipeline( container.getContainerInfo().getPipelineID().getProtobuf()) .getPipelineState().toString(); - LOG.info("Write Pipeline State: {}", pipelineState); + System.out.printf("Write Pipeline State: %s%n", pipelineState); } catch (IOException ioe) { if (SCMHAUtils.unwrapException( ioe) instanceof PipelineNotFoundException) { - LOG.info("Write Pipeline State: CLOSED"); + System.out.println("Write Pipeline State: CLOSED"); } else { printError("Failed to retrieve pipeline info"); } } - LOG.info("Container State: {}", container.getContainerInfo().getState()); + System.out.printf("Container State: %s%n", container.getContainerInfo().getState()); // Print pipeline of an existing container. String machinesStr = container.getPipeline().getNodes().stream().map( InfoSubcommand::buildDatanodeDetails) .collect(Collectors.joining(",\n")); - LOG.info("Datanodes: [{}]", machinesStr); + System.out.printf("Datanodes: [%s]%n", machinesStr); // Print the replica details if available if (replicas != null) { @@ -223,7 +218,7 @@ private void printDetails(ScmClient scmClient, long containerID, .sorted(Comparator.comparing(ContainerReplicaInfo::getReplicaIndex)) .map(InfoSubcommand::buildReplicaDetails) .collect(Collectors.joining(",\n")); - LOG.info("Replicas: [{}]", replicaStr); + System.out.printf("Replicas: [%s]%n", replicaStr); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index b120fe4169da..ecc43d04087a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -36,8 +36,6 @@ import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -52,9 +50,6 @@ versionProvider = HddsVersionProvider.class) public class ListSubcommand extends ScmSubcommand { - private static final Logger LOG = - LoggerFactory.getLogger(ListSubcommand.class); - @Option(names = {"-s", "--start"}, description = "Container id to start the iteration") private long startId; @@ -94,7 +89,7 @@ public class ListSubcommand extends ScmSubcommand { private void outputContainerInfo(ContainerInfo containerInfo) throws IOException { // Print container report info. - LOG.info("{}", WRITER.writeValueAsString(containerInfo)); + System.out.println(WRITER.writeValueAsString(containerInfo)); } @Override diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java index d8c1addb78e0..efc11d550f55 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java @@ -28,9 +28,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.log4j.AppenderSkeleton; -import org.apache.log4j.Logger; -import org.apache.log4j.spi.LoggingEvent; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -44,6 +41,7 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.UUID; import java.util.regex.Matcher; @@ -52,6 +50,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -69,8 +68,6 @@ public class TestInfoSubCommand { private ScmClient scmClient; private InfoSubcommand cmd; private List datanodes; - private Logger logger; - private TestAppender appender; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); @@ -88,18 +85,12 @@ public void setup() throws IOException { when(scmClient.getContainerWithPipeline(anyLong())).then(i -> getContainerWithPipeline(i.getArgument(0))); when(scmClient.getPipeline(any())).thenThrow(new PipelineNotFoundException("Pipeline not found.")); - appender = new TestAppender(); - logger = Logger.getLogger( - org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand.class); - logger.addAppender(appender); - System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @AfterEach public void after() { - logger.removeAppender(appender); System.setOut(originalOut); System.setErr(originalErr); System.setIn(originalIn); @@ -150,10 +141,8 @@ public void testContainersCanBeReadFromStdin() throws IOException { private void validateMultiOutput() throws UnsupportedEncodingException { // Ensure we have a log line for each containerID - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage() - .matches("(?s)^Container id: (1|123|456|789).*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^Container id: (1|123|456|789).*")) .collect(Collectors.toList()); assertEquals(4, replica.size()); @@ -191,10 +180,8 @@ public void testMultipleContainersCanBePassedJson() throws Exception { private void validateJsonMultiOutput() throws UnsupportedEncodingException { // Ensure we have a log line for each containerID - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage() - .matches("(?s)^.*\"containerInfo\".*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^.*\"containerInfo\".*")) .collect(Collectors.toList()); assertEquals(4, replica.size()); @@ -213,34 +200,33 @@ private void testReplicaIncludedInOutput(boolean includeIndex) cmd.execute(scmClient); // Ensure we have a line for Replicas: - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) - .collect(Collectors.toList()); - assertEquals(1, replica.size()); + String output = outContent.toString(DEFAULT_ENCODING); + Pattern pattern = Pattern.compile("Replicas: \\[.*\\]", Pattern.DOTALL); + Matcher matcher = pattern.matcher(output); + assertTrue(matcher.find()); + String replica = matcher.group(); // Ensure each DN UUID is mentioned in the message: for (DatanodeDetails dn : datanodes) { - Pattern pattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", + Pattern uuidPattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", Pattern.DOTALL); - Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); - assertTrue(matcher.matches()); + assertThat(replica).matches(uuidPattern); } // Ensure the replicaIndex output is in order if (includeIndex) { List indexList = new ArrayList<>(); for (int i = 1; i < datanodes.size() + 1; i++) { String temp = "ReplicaIndex: " + i; - indexList.add(replica.get(0).getRenderedMessage().indexOf(temp)); + indexList.add(replica.indexOf(temp)); } assertEquals(datanodes.size(), indexList.size()); assertTrue(inSort(indexList)); } // Ensure ReplicaIndex is not mentioned as it was not passed in the proto: - Pattern pattern = Pattern.compile(".*ReplicaIndex.*", - Pattern.DOTALL); - Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); - assertEquals(includeIndex, matcher.matches()); + assertEquals(includeIndex, + Pattern.compile(".*ReplicaIndex.*", Pattern.DOTALL) + .matcher(replica) + .matches()); } @Test @@ -253,9 +239,8 @@ public void testReplicasNotOutputIfError() throws IOException { cmd.execute(scmClient); // Ensure we have no lines for Replicas: - List logs = appender.getLog(); - List replica = logs.stream() - .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) + List replica = Arrays.stream(outContent.toString(DEFAULT_ENCODING).split("\n")) + .filter(m -> m.matches("(?s)^Replicas:.*")) .collect(Collectors.toList()); assertEquals(0, replica.size()); @@ -274,9 +259,7 @@ public void testReplicasNotOutputIfErrorWithJson() throws IOException { c.parseArgs("1", "--json"); cmd.execute(scmClient); - List logs = appender.getLog(); - assertEquals(1, logs.size()); - String json = logs.get(0).getRenderedMessage(); + String json = outContent.toString(DEFAULT_ENCODING); assertFalse(json.matches("(?s).*replicas.*")); } @@ -310,11 +293,8 @@ private void testJsonOutput() throws IOException { c.parseArgs("1", "--json"); cmd.execute(scmClient); - List logs = appender.getLog(); - assertEquals(1, logs.size()); - // Ensure each DN UUID is mentioned in the message after replicas: - String json = logs.get(0).getRenderedMessage(); + String json = outContent.toString(DEFAULT_ENCODING); assertTrue(json.matches("(?s).*replicas.*")); for (DatanodeDetails dn : datanodes) { Pattern pattern = Pattern.compile( @@ -409,25 +389,4 @@ private List createDatanodeDetails(int count) { return dns; } - private static class TestAppender extends AppenderSkeleton { - private final List log = new ArrayList<>(); - - @Override - public boolean requiresLayout() { - return false; - } - - @Override - protected void append(final LoggingEvent loggingEvent) { - log.add(loggingEvent); - } - - @Override - public void close() { - } - - public List getLog() { - return new ArrayList<>(log); - } - } } From c325315cae49b88e3e1c707b1d8f23e8c94db502 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 13:12:04 +0100 Subject: [PATCH 052/108] HDDS-10418. Bump commons-io to 2.15.1 (#6266) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 84fe041db915..5e2f49336e86 100644 --- a/pom.xml +++ b/pom.xml @@ -120,7 +120,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.8.0 1.5.2-5 1.0.13 - 2.11.0 + 2.15.1 3.14.0 1.2 1.1 From decacde8fc7468f5b376382d7f6a530036bbcec7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 14:41:41 +0100 Subject: [PATCH 053/108] HDDS-10419. Bump maven-gpg-plugin to 3.1.0 (#6211) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 5e2f49336e86..c76df71514ca 100644 --- a/pom.xml +++ b/pom.xml @@ -94,7 +94,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs UTF-8 UTF-8 - 1.5 + 3.1.0 bash From f62a8e33f34ffc1b7a46f862948ab348a1dd4aab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 16:31:20 +0100 Subject: [PATCH 054/108] HDDS-10420. Bump restrict-imports-enforcer-rule to 2.5.0 (#6264) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index c76df71514ca..1f8af9f6b677 100644 --- a/pom.xml +++ b/pom.xml @@ -296,7 +296,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.9.7 1.14.0 - 2.4.0 + 2.5.0 1.0.1 5.3.27 From 0cd6b3bf8a230b8fab71be7ecd802ef36cd7b55f Mon Sep 17 00:00:00 2001 From: SaketaChalamchala Date: Sun, 25 Feb 2024 00:18:57 -0800 Subject: [PATCH 055/108] HDDS-10399. IndexOutOfBoundsException when shallow listing empty directory in non-FSO bucket (#6259) --- .../hadoop/ozone/client/OzoneBucket.java | 2 +- .../apache/hadoop/ozone/om/TestListKeys.java | 114 +++++++++++++++--- 2 files changed, 95 insertions(+), 21 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index dc7d6cf0a717..3b76daeba4e5 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -1253,7 +1253,7 @@ List getNextShallowListOfKeys(String prevKey) proxy.listStatusLight(volumeName, name, delimiterKeyPrefix, false, startKey, listCacheSize, false); - if (addedKeyPrefix) { + if (addedKeyPrefix && statuses.size() > 0) { // previous round already include the startKey, so remove it statuses.remove(0); } else { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java index be972557f4a4..204c0ee66818 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeys.java @@ -63,6 +63,8 @@ public class TestListKeys { private static OzoneConfiguration conf; private static OzoneBucket legacyOzoneBucket; + + private static OzoneBucket obsOzoneBucket; private static OzoneClient client; /** @@ -86,6 +88,10 @@ public static void init() throws Exception { legacyOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.LEGACY); + // create a volume and a OBJECT_STORE bucket + obsOzoneBucket = TestDataUtil + .createVolumeAndBucket(client, BucketLayout.OBJECT_STORE); + initFSNameSpace(); } @@ -99,6 +105,7 @@ public static void teardownClass() { private static void initFSNameSpace() throws Exception { buildNameSpaceTree(legacyOzoneBucket); + buildNameSpaceTree(obsOzoneBucket); } /** @@ -108,9 +115,9 @@ private static void initFSNameSpace() throws Exception { * | * a1 * | - * ----------------------------------- - * | | | - * b1 b2 b3 + * -------------------------------------------------------- + * | | | | + * b1 b2 b3 b4 * ------- --------- ----------- * | | | | | | | | * c1 c2 d1 d2 d3 e1 e2 e3 @@ -125,25 +132,27 @@ private static void initFSNameSpace() throws Exception { private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { LinkedList keys = new LinkedList<>(); - keys.add("/a1/b1/c1111.tx"); - keys.add("/a1/b1/c1222.tx"); - keys.add("/a1/b1/c1333.tx"); - keys.add("/a1/b1/c1444.tx"); - keys.add("/a1/b1/c1555.tx"); - keys.add("/a1/b1/c1/c1.tx"); - keys.add("/a1/b1/c12/c2.tx"); - keys.add("/a1/b1/c12/c3.tx"); - - keys.add("/a1/b2/d1/d11.tx"); - keys.add("/a1/b2/d2/d21.tx"); - keys.add("/a1/b2/d2/d22.tx"); - keys.add("/a1/b2/d3/d31.tx"); - - keys.add("/a1/b3/e1/e11.tx"); - keys.add("/a1/b3/e2/e21.tx"); - keys.add("/a1/b3/e3/e31.tx"); + keys.add("a1/b1/c1111.tx"); + keys.add("a1/b1/c1222.tx"); + keys.add("a1/b1/c1333.tx"); + keys.add("a1/b1/c1444.tx"); + keys.add("a1/b1/c1555.tx"); + keys.add("a1/b1/c1/c1.tx"); + keys.add("a1/b1/c12/c2.tx"); + keys.add("a1/b1/c12/c3.tx"); + + keys.add("a1/b2/d1/d11.tx"); + keys.add("a1/b2/d2/d21.tx"); + keys.add("a1/b2/d2/d22.tx"); + keys.add("a1/b2/d3/d31.tx"); + + keys.add("a1/b3/e1/e11.tx"); + keys.add("a1/b3/e2/e21.tx"); + keys.add("a1/b3/e3/e31.tx"); createKeys(ozoneBucket, keys); + + ozoneBucket.createDirectory("a1/b4/"); } private static Stream shallowListDataWithTrailingSlash() { @@ -186,6 +195,58 @@ private static Stream shallowListDataWithTrailingSlash() { "a1/b1/c1333.tx", "a1/b1/c1444.tx", "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey is empty, return key that is same as keyPrefix. + of("a1/b4/", "", newLinkedList(Arrays.asList( + "a1/b4/" + ))) + ); + } + + private static Stream shallowListObsDataWithTrailingSlash() { + return Stream.of( + + // Case-1: StartKey is less than prefixKey, return emptyList. + of("a1/b2/", "a1", newLinkedList(Collections.emptyList())), + + // Case-2: StartKey is empty, return all immediate node. + of("a1/b2/", "", newLinkedList(Arrays.asList( + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-3: StartKey is same as prefixKey, return all immediate nodes. + of("a1/b2/", "a1/b2", newLinkedList(Arrays.asList( + "a1/b2/d1/", + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-4: StartKey is greater than prefixKey + of("a1/b2/", "a1/b2/d2/d21.tx", newLinkedList(Arrays.asList( + "a1/b2/d2/", + "a1/b2/d3/" + ))), + + // Case-5: StartKey reaches last element, return emptyList + of("a1/b2/", "a1/b2/d3/d31.tx", newLinkedList( + Collections.emptyList() + )), + + // Case-6: Mix result + of("a1/b1/", "a1/b1/c12", newLinkedList(Arrays.asList( + "a1/b1/c12/", + "a1/b1/c1222.tx", + "a1/b1/c1333.tx", + "a1/b1/c1444.tx", + "a1/b1/c1555.tx" + ))), + + // Case-7: StartKey is empty, return key that is same as keyPrefix. + of("a1/b4/", "", newLinkedList(Arrays.asList( + "a1/b4/" ))) ); } @@ -252,6 +313,11 @@ private static Stream shallowListDataWithoutTrailingSlash() { of("a1/b1/c12", "", newLinkedList(Arrays.asList( "a1/b1/c12/", "a1/b1/c1222.tx" + ))), + + // Case-10: + of("a1/b4", "", newLinkedList(Arrays.asList( + "a1/b4/" ))) ); @@ -264,11 +330,19 @@ public void testShallowListKeysWithPrefixTrailingSlash(String keyPrefix, checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); } + @ParameterizedTest + @MethodSource("shallowListObsDataWithTrailingSlash") + public void testShallowListObsKeysWithPrefixTrailingSlash(String keyPrefix, + String startKey, List expectedKeys) throws Exception { + checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket); + } + @ParameterizedTest @MethodSource("shallowListDataWithoutTrailingSlash") public void testShallowListKeysWithoutPrefixTrailingSlash(String keyPrefix, String startKey, List expectedKeys) throws Exception { checkKeyShallowList(keyPrefix, startKey, expectedKeys, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, obsOzoneBucket); } private void checkKeyShallowList(String keyPrefix, String startKey, From dc9bd61914ae7e1f6ee774b3b01b6bb478a80e44 Mon Sep 17 00:00:00 2001 From: david1859168 <71422636+david1859168@users.noreply.github.com> Date: Sun, 25 Feb 2024 20:38:13 +1100 Subject: [PATCH 056/108] HDDS-10365. Fix description for `ozone getconf ozonemanagers` (#6263) --- .../apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java index e8ced23b348f..f66f4f3abda2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java @@ -33,8 +33,7 @@ */ @Command(name = "ozonemanagers", aliases = {"-ozonemanagers"}, - description = "gets list of ozone storage container " - + "manager nodes in the cluster", + description = "gets list of Ozone Manager nodes in the cluster", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) public class OzoneManagersCommandHandler implements Callable { From df68290e72251c569afe7771ff4d9ef6284d6065 Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Sun, 25 Feb 2024 23:16:16 +0800 Subject: [PATCH 057/108] HDDS-10214. Update supported versions in security policy up to 1.4.0 (#6100) --- SECURITY.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/SECURITY.md b/SECURITY.md index 2f92dd685c12..3a89968026a2 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -5,13 +5,16 @@ The first stable release of Apache Ozone is 1.0, the previous alpha and beta releases are not supported by the community. | Version | Supported | -| ------------- | ------------------ | +|---------------| ------------------ | | 0.3.0 (alpha) | :x: | | 0.4.0 (alpha) | :x: | | 0.4.1 (alpha) | :x: | | 0.5.0 (beta) | :x: | -| 1.0 | :white_check_mark: | -| 1.1 | :white_check_mark: | +| 1.0.0 | :x: | +| 1.1.0 | :x: | +| 1.2.1 | :x: | +| 1.3.0 | :x: | +| 1.4.0 | :white_check_mark: | ## Reporting a Vulnerability From 84c6e4d861d907d1ac39e252aa97e8a512ef247b Mon Sep 17 00:00:00 2001 From: XiChen <32928346+xichen01@users.noreply.github.com> Date: Mon, 26 Feb 2024 15:37:28 +0800 Subject: [PATCH 058/108] HDDS-10384. RPC client Reusing thread resources. (#6270) --- .../hdds/scm/storage/AbstractCommitWatcher.java | 2 +- .../hadoop/hdds/scm/storage/BlockOutputStream.java | 5 +---- .../reconstruction/ECReconstructionCoordinator.java | 3 +-- .../hadoop/ozone/client/io/ECKeyOutputStream.java | 11 ++++------- .../apache/hadoop/ozone/client/rpc/RpcClient.java | 3 +-- .../ozone/client/rpc/TestOzoneAtRestEncryption.java | 12 +++++++++--- 6 files changed, 17 insertions(+), 19 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 0c5501c7922c..957f761ccbc2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -73,7 +73,7 @@ SortedMap> getCommitIndexMap() { return commitIndexMap; } - void updateCommitInfoMap(long index, List buffers) { + synchronized void updateCommitInfoMap(long index, List buffers) { commitIndexMap.computeIfAbsent(index, k -> new LinkedList<>()) .addAll(buffers); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 5ff5da60989e..5c0516d7bd4f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -25,7 +25,6 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; @@ -182,8 +181,7 @@ public BlockOutputStream( (long) flushPeriod * streamBufferArgs.getStreamBufferSize() == streamBufferArgs .getStreamBufferFlushSize()); - // A single thread executor handle the responses of async requests - responseExecutor = Executors.newSingleThreadExecutor(); + this.responseExecutor = blockOutputStreamResourceProvider.get(); bufferList = null; totalDataFlushedLength = 0; writtenDataLength = 0; @@ -657,7 +655,6 @@ public void cleanup(boolean invalidateClient) { bufferList.clear(); } bufferList = null; - responseExecutor.shutdown(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index a45c15844847..90756bbc8898 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -101,8 +101,7 @@ public class ECReconstructionCoordinator implements Closeable { private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; - // TODO: Adjusts to the appropriate value when the ec-reconstruct-writer thread pool is used. - private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 0; + private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 5; private final ECContainerOperationClient containerOperationClient; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java index 878558073f75..0cb3973e0411 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java @@ -43,8 +43,6 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -66,7 +64,6 @@ public final class ECKeyOutputStream extends KeyOutputStream private final int numParityBlks; private final ByteBufferPool bufferPool; private final RawErasureEncoder encoder; - private final ExecutorService flushExecutor; private final Future flushFuture; private final AtomicLong flushCheckpoint; @@ -119,12 +116,13 @@ private ECKeyOutputStream(Builder builder) { this.writeOffset = 0; this.encoder = CodecUtil.createRawEncoderWithFallback( builder.getReplicationConfig()); - this.flushExecutor = Executors.newSingleThreadExecutor(); S3Auth s3Auth = builder.getS3CredentialsProvider().get(); ThreadLocal s3CredentialsProvider = builder.getS3CredentialsProvider(); - flushExecutor.submit(() -> s3CredentialsProvider.set(s3Auth)); - this.flushFuture = this.flushExecutor.submit(this::flushStripeFromQueue); + this.flushFuture = builder.getExecutorServiceSupplier().get().submit(() -> { + s3CredentialsProvider.set(s3Auth); + return flushStripeFromQueue(); + }); this.flushCheckpoint = new AtomicLong(0); this.atomicKeyCreation = builder.getAtomicKeyCreation(); } @@ -495,7 +493,6 @@ public void close() throws IOException { } catch (InterruptedException e) { throw new IOException("Flushing thread was interrupted", e); } finally { - flushExecutor.shutdownNow(); closeCurrentStreamEntry(); blockOutputStreamEntryPool.cleanup(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 74b22e7ca4c6..a6830ba9f771 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -196,8 +196,7 @@ public class RpcClient implements ClientProtocol { // for reconstruction. private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; - // TODO: Adjusts to the appropriate value when the writeThreadPool is used. - private static final int WRITE_POOL_MIN_SIZE = 0; + private static final int WRITE_POOL_MIN_SIZE = 1; private final ConfigurationSource conf; private final OzoneManagerClientProtocol ozoneManagerClient; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 29cf1bc5e117..44303ed2ff23 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -213,6 +213,14 @@ static void shutdown() throws IOException { } } + static void reInitClient() throws IOException { + ozClient = OzoneClientFactory.getRpcClient(conf); + store = ozClient.getObjectStore(); + TestOzoneRpcClient.setOzClient(ozClient); + TestOzoneRpcClient.setStore(store); + } + + @ParameterizedTest @EnumSource void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { @@ -770,9 +778,7 @@ void testGetKeyProvider() throws Exception { KeyProvider kp3 = ozClient.getObjectStore().getKeyProvider(); assertNotEquals(kp3, kpSpy); - // Restore ozClient and store - TestOzoneRpcClient.setOzClient(OzoneClientFactory.getRpcClient(conf)); - TestOzoneRpcClient.setStore(ozClient.getObjectStore()); + reInitClient(); } private static RepeatedOmKeyInfo getMatchedKeyInfo( From 50d43e8ae4adb988cdd7c4161442b8eab7969ef7 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 26 Feb 2024 09:02:15 +0100 Subject: [PATCH 059/108] HDDS-10394. Fix parameter number warning in om.helpers (#6271) --- .../ozone/om/helpers/BasicOmKeyInfo.java | 67 ++++----- .../hadoop/ozone/om/helpers/OmBucketArgs.java | 142 +++++++----------- .../hadoop/ozone/om/helpers/OmBucketInfo.java | 95 ++++-------- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 71 ++++----- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 78 +++------- .../ozone/om/helpers/OmMultipartKeyInfo.java | 69 ++++----- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 118 +++++---------- 7 files changed, 236 insertions(+), 404 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java index 9c9a5027774f..044cc17f5e57 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/BasicOmKeyInfo.java @@ -28,29 +28,37 @@ /** * Lightweight OmKeyInfo class. */ -public class BasicOmKeyInfo { - - private String volumeName; - private String bucketName; - private String keyName; - private long dataSize; - private long creationTime; - private long modificationTime; - private ReplicationConfig replicationConfig; - private boolean isFile; - - @SuppressWarnings("parameternumber") - public BasicOmKeyInfo(String volumeName, String bucketName, String keyName, - long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, boolean isFile) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationConfig = replicationConfig; - this.isFile = isFile; +public final class BasicOmKeyInfo { + + private final String volumeName; + private final String bucketName; + private final String keyName; + private final long dataSize; + private final long creationTime; + private final long modificationTime; + private final ReplicationConfig replicationConfig; + private final boolean isFile; + + private BasicOmKeyInfo(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.replicationConfig = b.replicationConfig; + this.isFile = b.isFile; + } + + private BasicOmKeyInfo(OmKeyInfo b) { + this.volumeName = b.getVolumeName(); + this.bucketName = b.getBucketName(); + this.keyName = b.getKeyName(); + this.dataSize = b.getDataSize(); + this.creationTime = b.getCreationTime(); + this.modificationTime = b.getModificationTime(); + this.replicationConfig = b.getReplicationConfig(); + this.isFile = b.isFile(); } public String getVolumeName() { @@ -139,8 +147,7 @@ public Builder setIsFile(boolean isFile) { } public BasicOmKeyInfo build() { - return new BasicOmKeyInfo(volumeName, bucketName, keyName, dataSize, - creationTime, modificationTime, replicationConfig, isFile); + return new BasicOmKeyInfo(this); } } @@ -233,14 +240,6 @@ public int hashCode() { } public static BasicOmKeyInfo fromOmKeyInfo(OmKeyInfo omKeyInfo) { - return new BasicOmKeyInfo( - omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), - omKeyInfo.getKeyName(), - omKeyInfo.getDataSize(), - omKeyInfo.getCreationTime(), - omKeyInfo.getModificationTime(), - omKeyInfo.getReplicationConfig(), - omKeyInfo.isFile()); + return new BasicOmKeyInfo(omKeyInfo); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 55d05dccd755..34e93c1674af 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.om.helpers; +import java.util.HashMap; import java.util.LinkedHashMap; import java.util.Map; @@ -44,44 +45,40 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { /** * Bucket Version flag. */ - private Boolean isVersionEnabled; + private final Boolean isVersionEnabled; /** * Type of storage to be used for this bucket. * [RAM_DISK, SSD, DISK, ARCHIVE] */ - private StorageType storageType; + private final StorageType storageType; /** * Bucket encryption key info if encryption is enabled. */ - private BucketEncryptionKeyInfo bekInfo; - private long quotaInBytes = OzoneConsts.QUOTA_RESET; - private long quotaInNamespace = OzoneConsts.QUOTA_RESET; - private boolean quotaInBytesSet = false; - private boolean quotaInNamespaceSet = false; - private DefaultReplicationConfig defaultReplicationConfig = null; + private final BucketEncryptionKeyInfo bekInfo; + private final long quotaInBytes; + private final long quotaInNamespace; + private final boolean quotaInBytesSet; + private final boolean quotaInNamespaceSet; + private final DefaultReplicationConfig defaultReplicationConfig; /** * Bucket Owner Name. */ - private String ownerName; - - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketArgs(String volumeName, String bucketName, - Boolean isVersionEnabled, StorageType storageType, - Map metadata, String ownerName) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - setMetadata(metadata); - this.ownerName = ownerName; + private final String ownerName; + + private OmBucketArgs(Builder b) { + setMetadata(b.metadata); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.isVersionEnabled = b.isVersionEnabled; + this.storageType = b.storageType; + this.ownerName = b.ownerName; + this.defaultReplicationConfig = b.defaultReplicationConfig; + this.quotaInBytesSet = b.quotaInBytesSet; + this.quotaInBytes = quotaInBytesSet ? b.quotaInBytes : OzoneConsts.QUOTA_RESET; + this.quotaInNamespaceSet = b.quotaInNamespaceSet; + this.quotaInNamespace = quotaInNamespaceSet ? b.quotaInNamespace : OzoneConsts.QUOTA_RESET; + this.bekInfo = b.bekInfo; } /** @@ -149,7 +146,6 @@ public boolean hasQuotaInNamespace() { /** * Returns Bucket default replication config. - * @return */ public DefaultReplicationConfig getDefaultReplicationConfig() { return defaultReplicationConfig; @@ -159,30 +155,6 @@ public BucketEncryptionKeyInfo getBucketEncryptionKeyInfo() { return bekInfo; } - /** - * Sets the Bucket default replication config. - */ - private void setDefaultReplicationConfig( - DefaultReplicationConfig defaultReplicationConfig) { - this.defaultReplicationConfig = defaultReplicationConfig; - } - - private void setQuotaInBytes(long quotaInBytes) { - this.quotaInBytesSet = true; - this.quotaInBytes = quotaInBytes; - } - - private void setQuotaInNamespace(long quotaInNamespace) { - this.quotaInNamespaceSet = true; - this.quotaInNamespace = quotaInNamespace; - } - - @Deprecated - private void setBucketEncryptionKey( - BucketEncryptionKeyInfo bucketEncryptionKey) { - this.bekInfo = bucketEncryptionKey; - } - /** * Returns Bucket Owner Name. * @@ -226,7 +198,7 @@ public static class Builder { private String bucketName; private Boolean isVersionEnabled; private StorageType storageType; - private Map metadata; + private final Map metadata = new HashMap<>(); private boolean quotaInBytesSet = false; private long quotaInBytes; private boolean quotaInNamespaceSet = false; @@ -259,12 +231,14 @@ public Builder setIsVersionEnabled(Boolean versionFlag) { @Deprecated public Builder setBucketEncryptionKey(BucketEncryptionKeyInfo info) { - this.bekInfo = info; + if (info == null || info.getKeyName() != null) { + this.bekInfo = info; + } return this; } - public Builder addMetadata(Map metadataMap) { - this.metadata = metadataMap; + public Builder addAllMetadata(Map map) { + metadata.putAll(map); return this; } @@ -303,20 +277,7 @@ public Builder setOwnerName(String owner) { public OmBucketArgs build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); - OmBucketArgs omBucketArgs = - new OmBucketArgs(volumeName, bucketName, isVersionEnabled, - storageType, metadata, ownerName); - omBucketArgs.setDefaultReplicationConfig(defaultReplicationConfig); - if (quotaInBytesSet) { - omBucketArgs.setQuotaInBytes(quotaInBytes); - } - if (quotaInNamespaceSet) { - omBucketArgs.setQuotaInNamespace(quotaInNamespace); - } - if (bekInfo != null && bekInfo.getKeyName() != null) { - omBucketArgs.setBucketEncryptionKey(bekInfo); - } - return omBucketArgs; + return new OmBucketArgs(this); } } @@ -348,7 +309,7 @@ public BucketArgs getProtobuf() { builder.setOwnerName(ownerName); } - if (bekInfo != null && bekInfo.getKeyName() != null) { + if (bekInfo != null) { builder.setBekInfo(OMPBHelper.convert(bekInfo)); } @@ -357,39 +318,42 @@ public BucketArgs getProtobuf() { /** * Parses BucketInfo protobuf and creates OmBucketArgs. - * @param bucketArgs * @return instance of OmBucketArgs */ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { - OmBucketArgs omBucketArgs = - new OmBucketArgs(bucketArgs.getVolumeName(), - bucketArgs.getBucketName(), - bucketArgs.hasIsVersionEnabled() ? - bucketArgs.getIsVersionEnabled() : null, - bucketArgs.hasStorageType() ? StorageType.valueOf( - bucketArgs.getStorageType()) : null, - KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList()), - bucketArgs.hasOwnerName() ? - bucketArgs.getOwnerName() : null); - // OmBucketArgs ctor already has more arguments, so setting the default - // replication config separately. + final OmBucketArgs.Builder builder = newBuilder() + .setVolumeName(bucketArgs.getVolumeName()) + .setBucketName(bucketArgs.getBucketName()) + .addAllMetadata(KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList())); + + if (bucketArgs.hasIsVersionEnabled()) { + builder.setIsVersionEnabled(bucketArgs.getIsVersionEnabled()); + } + if (bucketArgs.hasStorageType()) { + builder.setStorageType(StorageType.valueOf(bucketArgs.getStorageType())); + } + if (bucketArgs.hasOwnerName()) { + builder.setOwnerName(bucketArgs.getOwnerName()); + } + if (bucketArgs.hasDefaultReplicationConfig()) { - omBucketArgs.setDefaultReplicationConfig( + builder.setDefaultReplicationConfig( DefaultReplicationConfig.fromProto( bucketArgs.getDefaultReplicationConfig())); } if (bucketArgs.hasQuotaInBytes()) { - omBucketArgs.setQuotaInBytes(bucketArgs.getQuotaInBytes()); + builder.setQuotaInBytes(bucketArgs.getQuotaInBytes()); } if (bucketArgs.hasQuotaInNamespace()) { - omBucketArgs.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); + builder.setQuotaInNamespace(bucketArgs.getQuotaInNamespace()); } if (bucketArgs.hasBekInfo()) { - omBucketArgs.setBucketEncryptionKey( + builder.setBucketEncryptionKey( OMPBHelper.convert(bucketArgs.getBekInfo())); } - return omBucketArgs; + + return builder.build(); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index a1023d555c6b..9ec023cf90b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -109,68 +109,27 @@ public static Codec getCodec() { private String owner; - /** - * Private constructor, constructed via builder. - * @param volumeName - Volume name. - * @param bucketName - Bucket name. - * @param acls - list of ACLs. - * @param isVersionEnabled - Bucket version flag. - * @param storageType - Storage type to be used. - * @param creationTime - Bucket creation time. - * @param modificationTime - Bucket modification time. - * @param metadata - metadata. - * @param bekInfo - bucket encryption key info. - * @param sourceVolume - source volume for bucket links, null otherwise - * @param sourceBucket - source bucket for bucket links, null otherwise - * @param usedBytes - Bucket Quota Usage in bytes. - * @param quotaInBytes Bucket quota in bytes. - * @param quotaInNamespace Bucket quota in counts. - * @param bucketLayout bucket layout. - * @param owner owner of the bucket. - * @param defaultReplicationConfig default replication config. - * @param bucketLayout Bucket Layout. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private OmBucketInfo(String volumeName, - String bucketName, - List acls, - boolean isVersionEnabled, - StorageType storageType, - long creationTime, - long modificationTime, - long objectID, - long updateID, - Map metadata, - BucketEncryptionKeyInfo bekInfo, - String sourceVolume, - String sourceBucket, - long usedBytes, - long usedNamespace, - long quotaInBytes, - long quotaInNamespace, - BucketLayout bucketLayout, - String owner, - DefaultReplicationConfig defaultReplicationConfig) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.acls = acls; - this.isVersionEnabled = isVersionEnabled; - this.storageType = storageType; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - setObjectID(objectID); - setUpdateID(updateID); - setMetadata(metadata); - this.bekInfo = bekInfo; - this.sourceVolume = sourceVolume; - this.sourceBucket = sourceBucket; - this.usedBytes = usedBytes; - this.usedNamespace = usedNamespace; - this.quotaInBytes = quotaInBytes; - this.quotaInNamespace = quotaInNamespace; - this.bucketLayout = bucketLayout; - this.owner = owner; - this.defaultReplicationConfig = defaultReplicationConfig; + private OmBucketInfo(Builder b) { + setMetadata(b.metadata); + setObjectID(b.objectID); + setUpdateID(b.updateID); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.acls = b.acls; + this.isVersionEnabled = b.isVersionEnabled; + this.storageType = b.storageType; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.bekInfo = b.bekInfo; + this.sourceVolume = b.sourceVolume; + this.sourceBucket = b.sourceBucket; + this.usedBytes = b.usedBytes; + this.usedNamespace = b.usedNamespace; + this.quotaInBytes = b.quotaInBytes; + this.quotaInNamespace = b.quotaInNamespace; + this.bucketLayout = b.bucketLayout; + this.owner = b.owner; + this.defaultReplicationConfig = b.defaultReplicationConfig; } /** @@ -531,31 +490,37 @@ public Builder setBucketEncryptionKey( return this; } + /** @param volume - source volume for bucket links, null otherwise */ public Builder setSourceVolume(String volume) { this.sourceVolume = volume; return this; } + /** @param bucket - source bucket for bucket links, null otherwise */ public Builder setSourceBucket(String bucket) { this.sourceBucket = bucket; return this; } + /** @param quotaUsage - Bucket Quota Usage in bytes. */ public Builder setUsedBytes(long quotaUsage) { this.usedBytes = quotaUsage; return this; } + /** @param quotaUsage - Bucket Quota Usage in counts. */ public Builder setUsedNamespace(long quotaUsage) { this.usedNamespace = quotaUsage; return this; } + /** @param quota Bucket quota in bytes. */ public Builder setQuotaInBytes(long quota) { this.quotaInBytes = quota; return this; } + /** @param quota Bucket quota in counts. */ public Builder setQuotaInNamespace(long quota) { this.quotaInNamespace = quota; return this; @@ -587,11 +552,7 @@ public OmBucketInfo build() { Preconditions.checkNotNull(acls); Preconditions.checkNotNull(isVersionEnabled); Preconditions.checkNotNull(storageType); - return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, - storageType, creationTime, modificationTime, objectID, updateID, - metadata, bekInfo, sourceVolume, sourceBucket, usedBytes, - usedNamespace, quotaInBytes, quotaInNamespace, bucketLayout, owner, - defaultReplicationConfig); + return new OmBucketInfo(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 453dc3b957c0..132c39c4d00e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -45,39 +45,31 @@ public final class OmKeyArgs implements Auditable { private final boolean isMultipartKey; private final String multipartUploadID; private final int multipartUploadPartNumber; - private Map metadata; - private boolean sortDatanodesInPipeline; - private List acls; - private boolean latestVersionLocation; - private boolean recursive; - private boolean headOp; - private boolean forceUpdateContainerCacheFromSCM; - - @SuppressWarnings("parameternumber") - private OmKeyArgs(String volumeName, String bucketName, String keyName, - long dataSize, ReplicationConfig replicationConfig, - List locationInfoList, boolean isMultipart, - String uploadID, int partNumber, - Map metadataMap, - List acls, boolean sortDatanode, - boolean latestVersionLocation, boolean recursive, boolean headOp, - boolean forceUpdateContainerCacheFromSCM) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.replicationConfig = replicationConfig; - this.locationInfoList = locationInfoList; - this.isMultipartKey = isMultipart; - this.multipartUploadID = uploadID; - this.multipartUploadPartNumber = partNumber; - this.metadata = metadataMap; - this.acls = acls; - this.sortDatanodesInPipeline = sortDatanode; - this.latestVersionLocation = latestVersionLocation; - this.recursive = recursive; - this.headOp = headOp; - this.forceUpdateContainerCacheFromSCM = forceUpdateContainerCacheFromSCM; + private final Map metadata; + private final boolean sortDatanodesInPipeline; + private final List acls; + private final boolean latestVersionLocation; + private final boolean recursive; + private final boolean headOp; + private final boolean forceUpdateContainerCacheFromSCM; + + private OmKeyArgs(Builder b) { + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.replicationConfig = b.replicationConfig; + this.locationInfoList = b.locationInfoList; + this.isMultipartKey = b.isMultipartKey; + this.multipartUploadID = b.multipartUploadID; + this.multipartUploadPartNumber = b.multipartUploadPartNumber; + this.metadata = b.metadata; + this.acls = b.acls; + this.sortDatanodesInPipeline = b.sortDatanodesInPipeline; + this.latestVersionLocation = b.latestVersionLocation; + this.recursive = b.recursive; + this.headOp = b.headOp; + this.forceUpdateContainerCacheFromSCM = b.forceUpdateContainerCacheFromSCM; } public boolean getIsMultipartKey() { @@ -124,10 +116,6 @@ public Map getMetadata() { return metadata; } - public void setMetadata(Map metadata) { - this.metadata = metadata; - } - public void setLocationInfoList(List locationInfoList) { this.locationInfoList = locationInfoList; } @@ -224,7 +212,7 @@ public static class Builder { private boolean isMultipartKey; private String multipartUploadID; private int multipartUploadPartNumber; - private Map metadata = new HashMap<>(); + private final Map metadata = new HashMap<>(); private boolean sortDatanodesInPipeline; private boolean latestVersionLocation; private List acls; @@ -326,12 +314,7 @@ public Builder setForceUpdateContainerCacheFromSCM(boolean value) { } public OmKeyArgs build() { - return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, - replicationConfig, locationInfoList, isMultipartKey, - multipartUploadID, - multipartUploadPartNumber, metadata, acls, - sortDatanodesInPipeline, latestVersionLocation, recursive, headOp, - forceUpdateContainerCacheFromSCM); + return new OmKeyArgs(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index b2297accf85e..d1fe4a324068 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -98,47 +98,26 @@ public static Codec getCodec(boolean ignorePipeline) { /** * ACL Information. */ - private List acls; - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - List versions, long dataSize, - long creationTime, long modificationTime, - ReplicationConfig replicationConfig, - Map metadata, - FileEncryptionInfo encInfo, List acls, - long objectID, long updateID, FileChecksum fileChecksum) { - this.volumeName = volumeName; - this.bucketName = bucketName; - this.keyName = keyName; - this.dataSize = dataSize; - this.keyLocationVersions = versions; - this.creationTime = creationTime; - this.modificationTime = modificationTime; - this.replicationConfig = replicationConfig; - setMetadata(metadata); - this.encInfo = encInfo; - this.acls = acls; - setObjectID(objectID); - setUpdateID(updateID); - this.fileChecksum = fileChecksum; - } - - @SuppressWarnings("parameternumber") - OmKeyInfo(String volumeName, String bucketName, String keyName, - String fileName, List versions, - long dataSize, long creationTime, long modificationTime, - ReplicationConfig replicationConfig, - Map metadata, - FileEncryptionInfo encInfo, List acls, - long parentObjectID, long objectID, long updateID, - FileChecksum fileChecksum, boolean isFile) { - this(volumeName, bucketName, keyName, versions, dataSize, - creationTime, modificationTime, replicationConfig, metadata, - encInfo, acls, objectID, updateID, fileChecksum); - this.fileName = fileName; - setParentObjectID(parentObjectID); - this.isFile = isFile; + private final List acls; + + private OmKeyInfo(Builder b) { + setMetadata(b.metadata); + setObjectID(b.objectID); + setUpdateID(b.updateID); + setParentObjectID(b.parentObjectID); + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.keyName = b.keyName; + this.dataSize = b.dataSize; + this.keyLocationVersions = b.omKeyLocationInfoGroups; + this.creationTime = b.creationTime; + this.modificationTime = b.modificationTime; + this.replicationConfig = b.replicationConfig; + this.encInfo = b.encInfo; + this.acls = b.acls; + this.fileChecksum = b.fileChecksum; + this.fileName = b.fileName; + this.isFile = b.isFile; } public String getVolumeName() { @@ -195,10 +174,6 @@ public void setKeyLocationVersions( this.keyLocationVersions = keyLocationVersions; } - public void updateModifcationTime() { - this.modificationTime = Time.monotonicNow(); - } - public void setFile(boolean file) { isFile = file; } @@ -443,14 +418,14 @@ public static class Builder { private String bucketName; private String keyName; private long dataSize; - private List omKeyLocationInfoGroups = + private final List omKeyLocationInfoGroups = new ArrayList<>(); private long creationTime; private long modificationTime; private ReplicationConfig replicationConfig; - private Map metadata; + private final Map metadata; private FileEncryptionInfo encInfo; - private List acls; + private final List acls; private long objectID; private long updateID; // not persisted to DB. FileName will be the last element in path keyName. @@ -462,7 +437,6 @@ public static class Builder { public Builder() { this.metadata = new HashMap<>(); - omKeyLocationInfoGroups = new ArrayList<>(); acls = new ArrayList<>(); } @@ -577,11 +551,7 @@ public Builder setFile(boolean isAFile) { } public OmKeyInfo build() { - return new OmKeyInfo( - volumeName, bucketName, keyName, fileName, - omKeyLocationInfoGroups, dataSize, creationTime, - modificationTime, replicationConfig, metadata, encInfo, acls, - parentObjectID, objectID, updateID, fileChecksum, isFile); + return new OmKeyInfo(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java index 90b6301437ca..76bbc5546bd8 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmMultipartKeyInfo.java @@ -155,37 +155,34 @@ public PartKeyInfo lastEntry() { * multiKey1 | 1026 | 1025 | * ------------------------------------------| */ - private long parentID; + private final long parentID; /** * Construct OmMultipartKeyInfo object which holds multipart upload * information for a key. */ - @SuppressWarnings("parameternumber") - private OmMultipartKeyInfo(String id, long creationTime, - ReplicationConfig replicationConfig, - PartKeyInfoMap sortedMap, long objectID, long updateID, - long parentObjId) { - this.uploadID = id; - this.creationTime = creationTime; - this.replicationConfig = replicationConfig; - this.partKeyInfoMap = sortedMap; - setObjectID(objectID); - setUpdateID(updateID); - this.parentID = parentObjId; + private OmMultipartKeyInfo(Builder b) { + this.uploadID = b.uploadID; + this.creationTime = b.creationTime; + this.replicationConfig = b.replicationConfig; + this.partKeyInfoMap = new PartKeyInfoMap(b.partKeyInfoList); + setObjectID(b.objectID); + setUpdateID(b.updateID); + this.parentID = b.parentID; } - /** - * Construct OmMultipartKeyInfo object which holds multipart upload - * information for a key. - */ - @SuppressWarnings("parameternumber") - private OmMultipartKeyInfo(String id, long creationTime, - ReplicationConfig replicationConfig, - SortedMap list, long objectID, long updateID, - long parentObjId) { - this(id, creationTime, replicationConfig, new PartKeyInfoMap(list), - objectID, updateID, parentObjId); + /** Copy constructor. */ + private OmMultipartKeyInfo(OmMultipartKeyInfo b) { + this.uploadID = b.uploadID; + this.creationTime = b.creationTime; + this.replicationConfig = b.replicationConfig; + // PartKeyInfoMap is an immutable data structure. Whenever a PartKeyInfo + // is added, it returns a new shallow copy of the PartKeyInfoMap Object + // so here we can directly pass in partKeyInfoMap + this.partKeyInfoMap = b.partKeyInfoMap; + setObjectID(b.getObjectID()); + setUpdateID(b.getUpdateID()); + this.parentID = b.parentID; } /** @@ -232,7 +229,7 @@ public static class Builder { private String uploadID; private long creationTime; private ReplicationConfig replicationConfig; - private TreeMap partKeyInfoList; + private final TreeMap partKeyInfoList; private long objectID; private long updateID; private long parentID; @@ -286,8 +283,7 @@ public Builder setParentID(long parentObjId) { } public OmMultipartKeyInfo build() { - return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoList, objectID, updateID, parentID); + return new OmMultipartKeyInfo(this); } } @@ -308,10 +304,15 @@ public static OmMultipartKeyInfo getFromProto( multipartKeyInfo.getEcReplicationConfig() ); - return new OmMultipartKeyInfo(multipartKeyInfo.getUploadID(), - multipartKeyInfo.getCreationTime(), replicationConfig, - list, multipartKeyInfo.getObjectID(), - multipartKeyInfo.getUpdateID(), multipartKeyInfo.getParentID()); + return new Builder() + .setUploadID(multipartKeyInfo.getUploadID()) + .setCreationTime(multipartKeyInfo.getCreationTime()) + .setReplicationConfig(replicationConfig) + .setPartKeyInfoList(list) + .setObjectID(multipartKeyInfo.getObjectID()) + .setUpdateID(multipartKeyInfo.getUpdateID()) + .setParentID(multipartKeyInfo.getParentID()) + .build(); } /** @@ -358,11 +359,7 @@ public int hashCode() { } public OmMultipartKeyInfo copyObject() { - // PartKeyInfoMap is an immutable data structure. Whenever a PartKeyInfo - // is added, it returns a new shallow copy of the PartKeyInfoMap Object - // so here we can directly pass in partKeyInfoMap - return new OmMultipartKeyInfo(uploadID, creationTime, replicationConfig, - partKeyInfoMap, getObjectID(), getUpdateID(), parentID); + return new OmMultipartKeyInfo(this); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 56103ccb3ab8..b635ffd6d276 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -125,65 +125,26 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long exclusiveReplicatedSize; private boolean deepCleanedDeletedDir; - /** - * Private constructor, constructed via builder. - * @param snapshotId - Snapshot UUID. - * @param name - snapshot name. - * @param volumeName - volume name. - * @param bucketName - bucket name. - * @param snapshotStatus - status: SNAPSHOT_ACTIVE, SNAPSHOT_DELETED - * @param creationTime - Snapshot creation time. - * @param deletionTime - Snapshot deletion time. - * @param pathPreviousSnapshotId - Snapshot path previous snapshot id. - * @param globalPreviousSnapshotId - Snapshot global previous snapshot id. - * @param snapshotPath - Snapshot path, bucket .snapshot path. - * @param checkpointDir - Snapshot checkpoint directory. - * @param dbTxSequenceNumber - RDB latest transaction sequence number. - * @param deepCleaned - To be deep cleaned status for snapshot. - * @param referencedSize - Snapshot referenced size. - * @param referencedReplicatedSize - Snapshot referenced size w/ replication. - * @param exclusiveSize - Snapshot exclusive size. - * @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private SnapshotInfo(UUID snapshotId, - String name, - String volumeName, - String bucketName, - SnapshotStatus snapshotStatus, - long creationTime, - long deletionTime, - UUID pathPreviousSnapshotId, - UUID globalPreviousSnapshotId, - String snapshotPath, - String checkpointDir, - long dbTxSequenceNumber, - boolean deepCleaned, - boolean sstFiltered, - long referencedSize, - long referencedReplicatedSize, - long exclusiveSize, - long exclusiveReplicatedSize, - boolean deepCleanedDeletedDir) { - this.snapshotId = snapshotId; - this.name = name; - this.volumeName = volumeName; - this.bucketName = bucketName; - this.snapshotStatus = snapshotStatus; - this.creationTime = creationTime; - this.deletionTime = deletionTime; - this.pathPreviousSnapshotId = pathPreviousSnapshotId; - this.globalPreviousSnapshotId = globalPreviousSnapshotId; - this.snapshotPath = snapshotPath; - this.checkpointDir = checkpointDir; - this.dbTxSequenceNumber = dbTxSequenceNumber; - this.deepClean = deepCleaned; - this.sstFiltered = sstFiltered; - this.referencedSize = referencedSize; - this.referencedReplicatedSize = referencedReplicatedSize; - this.exclusiveSize = exclusiveSize; - this.exclusiveReplicatedSize = exclusiveReplicatedSize; - this.deepCleanedDeletedDir = deepCleanedDeletedDir; + private SnapshotInfo(Builder b) { + this.snapshotId = b.snapshotId; + this.name = b.name; + this.volumeName = b.volumeName; + this.bucketName = b.bucketName; + this.snapshotStatus = b.snapshotStatus; + this.creationTime = b.creationTime; + this.deletionTime = b.deletionTime; + this.pathPreviousSnapshotId = b.pathPreviousSnapshotId; + this.globalPreviousSnapshotId = b.globalPreviousSnapshotId; + this.snapshotPath = b.snapshotPath; + this.checkpointDir = b.checkpointDir; + this.dbTxSequenceNumber = b.dbTxSequenceNumber; + this.deepClean = b.deepClean; + this.sstFiltered = b.sstFiltered; + this.referencedSize = b.referencedSize; + this.referencedReplicatedSize = b.referencedReplicatedSize; + this.exclusiveSize = b.exclusiveSize; + this.exclusiveReplicatedSize = b.exclusiveReplicatedSize; + this.deepCleanedDeletedDir = b.deepCleanedDeletedDir; } public void setName(String name) { @@ -338,66 +299,79 @@ public Builder() { this.snapshotStatus = SnapshotStatus.DEFAULT; } + /** @param snapshotId - Snapshot UUID. */ public Builder setSnapshotId(UUID snapshotId) { this.snapshotId = snapshotId; return this; } + /** @param name - snapshot name. */ public Builder setName(String name) { this.name = name; return this; } + /** @param volumeName - volume name. */ public Builder setVolumeName(String volumeName) { this.volumeName = volumeName; return this; } + /** @param bucketName - bucket name. */ public Builder setBucketName(String bucketName) { this.bucketName = bucketName; return this; } + /** @param snapshotStatus - status: SNAPSHOT_ACTIVE, SNAPSHOT_DELETED */ public Builder setSnapshotStatus(SnapshotStatus snapshotStatus) { this.snapshotStatus = snapshotStatus; return this; } + /** @param crTime - Snapshot creation time. */ public Builder setCreationTime(long crTime) { this.creationTime = crTime; return this; } + /** @param delTime - Snapshot deletion time. */ public Builder setDeletionTime(long delTime) { this.deletionTime = delTime; return this; } + /** @param pathPreviousSnapshotId - Snapshot path previous snapshot id. */ public Builder setPathPreviousSnapshotId(UUID pathPreviousSnapshotId) { this.pathPreviousSnapshotId = pathPreviousSnapshotId; return this; } + /** @param globalPreviousSnapshotId - Snapshot global previous snapshot id. */ public Builder setGlobalPreviousSnapshotId(UUID globalPreviousSnapshotId) { this.globalPreviousSnapshotId = globalPreviousSnapshotId; return this; } + /** @param snapshotPath - Snapshot path, bucket .snapshot path. */ public Builder setSnapshotPath(String snapshotPath) { this.snapshotPath = snapshotPath; return this; } + /** @param checkpointDir - Snapshot checkpoint directory. */ public Builder setCheckpointDir(String checkpointDir) { this.checkpointDir = checkpointDir; return this; } + /** @param dbTxSequenceNumber - RDB latest transaction sequence number. */ public Builder setDbTxSequenceNumber(long dbTxSequenceNumber) { this.dbTxSequenceNumber = dbTxSequenceNumber; return this; } + /** @param deepClean - To be deep cleaned status for snapshot. */ public Builder setDeepClean(boolean deepClean) { this.deepClean = deepClean; return this; @@ -408,21 +382,25 @@ public Builder setSstFiltered(boolean sstFiltered) { return this; } + /** @param referencedSize - Snapshot referenced size. */ public Builder setReferencedSize(long referencedSize) { this.referencedSize = referencedSize; return this; } + /** @param referencedReplicatedSize - Snapshot referenced size w/ replication. */ public Builder setReferencedReplicatedSize(long referencedReplicatedSize) { this.referencedReplicatedSize = referencedReplicatedSize; return this; } + /** @param exclusiveSize - Snapshot exclusive size. */ public Builder setExclusiveSize(long exclusiveSize) { this.exclusiveSize = exclusiveSize; return this; } + /** @param exclusiveReplicatedSize - Snapshot exclusive size w/ replication. */ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { this.exclusiveReplicatedSize = exclusiveReplicatedSize; return this; @@ -435,27 +413,7 @@ public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { public SnapshotInfo build() { Preconditions.checkNotNull(name); - return new SnapshotInfo( - snapshotId, - name, - volumeName, - bucketName, - snapshotStatus, - creationTime, - deletionTime, - pathPreviousSnapshotId, - globalPreviousSnapshotId, - snapshotPath, - checkpointDir, - dbTxSequenceNumber, - deepClean, - sstFiltered, - referencedSize, - referencedReplicatedSize, - exclusiveSize, - exclusiveReplicatedSize, - deepCleanedDeletedDir - ); + return new SnapshotInfo(this); } } From 1b48186a0107711235abcd2636977ae0242f6be8 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 26 Feb 2024 09:24:07 +0100 Subject: [PATCH 060/108] HDDS-10415. Remove duplicate HA MiniOzoneCluster factory method (#6261) --- .../hadoop/fs/ozone/TestOzoneFsHAURLs.java | 2 +- .../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 2 +- .../apache/hadoop/ozone/MiniOzoneCluster.java | 4 - .../ozone/TestMiniOzoneOMHACluster.java | 92 ------------------- .../ozone/om/TestOMBucketLayoutUpgrade.java | 2 +- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 2 +- .../ozone/om/TestOMUpgradeFinalization.java | 2 +- .../ozone/om/TestOmSnapshotDisabled.java | 2 +- .../om/TestOmSnapshotDisabledRestart.java | 2 +- .../hadoop/ozone/om/TestOzoneManagerHA.java | 2 +- .../ozone/om/TestOzoneManagerHASnapshot.java | 2 +- .../om/TestSnapshotBackgroundServices.java | 2 +- .../snapshot/TestOzoneManagerSnapshotAcl.java | 2 +- .../TestOzoneManagerSnapshotProvider.java | 2 +- .../om/snapshot/TestOzoneSnapshotRestore.java | 2 +- .../recon/TestReconWithOzoneManagerHA.java | 2 +- .../shell/TestDeletedBlocksTxnShell.java | 2 +- .../hadoop/ozone/shell/TestOzoneShellHA.java | 2 +- .../ozone/shell/TestOzoneTenantShell.java | 2 +- .../hadoop/ozone/shell/TestReconfigShell.java | 2 +- .../hadoop/ozone/shell/TestScmAdminHA.java | 2 +- 21 files changed, 19 insertions(+), 115 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 98b87d9d3031..9099201a85e8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -107,7 +107,7 @@ static void initClass(@TempDir File tempDir) throws Exception { conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); // Start the cluster - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setNumDatanodes(5) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index ae6a24a910cf..7afdf7144f05 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -86,7 +86,7 @@ static void initClass() throws Exception { conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); // Start the cluster - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(1) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 33e15bf98607..dbbde838d3ba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -62,10 +62,6 @@ static Builder newBuilder(OzoneConfiguration conf) { * * @return MiniOzoneCluster builder */ - static Builder newOMHABuilder(OzoneConfiguration conf) { - return new MiniOzoneHAClusterImpl.Builder(conf); - } - static Builder newHABuilder(OzoneConfiguration conf) { return new MiniOzoneHAClusterImpl.Builder(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java deleted file mode 100644 index 0c51ba41311c..000000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.ozone.test.GenericTestUtils; - -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -/** - * This class tests MiniOzoneHAClusterImpl. - */ -@Timeout(value = 300, unit = TimeUnit.SECONDS) -public class TestMiniOzoneOMHACluster { - - private MiniOzoneHAClusterImpl cluster = null; - private OzoneConfiguration conf; - private String omServiceId; - private int numOfOMs = 3; - - /** - * Create a MiniOzoneHAClusterImpl for testing. - * - * @throws Exception - */ - @BeforeEach - public void init() throws Exception { - conf = new OzoneConfiguration(); - omServiceId = "omServiceId1"; - conf.setBoolean(OZONE_ACL_ENABLED, true); - conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, - OZONE_ADMINISTRATORS_WILDCARD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - cluster.waitForClusterToBeReady(); - } - - /** - * Shutdown MiniOzoneHAClusterImpl. - */ - @AfterEach - public void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testGetOMLeader() throws InterruptedException, TimeoutException { - AtomicReference ozoneManager = new AtomicReference<>(); - // Wait for OM leader election to finish - GenericTestUtils.waitFor(() -> { - OzoneManager om = cluster.getOMLeader(); - ozoneManager.set(om); - return om != null; - }, 100, 120000); - assertNotNull(ozoneManager, "Timed out waiting OM leader election to finish: " - + "no leader or more than one leader."); - assertTrue(ozoneManager.get().isLeaderReady(), "Should have gotten the leader!"); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index 58d19d846d4a..4b1fa817346d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -94,7 +94,7 @@ void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, fromLayoutVersion); String omServiceId = UUID.randomUUID().toString(); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) .setNumDatanodes(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index 9e816174b7d5..a42b9ce4693b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -155,7 +155,7 @@ public void init(TestInfo testInfo) throws Exception { omRatisConf.setLogAppenderWaitTimeMin(10); conf.setFromObject(omRatisConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index 22358cbe6bb7..716348e65a6f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -105,7 +105,7 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, INITIAL_VERSION.layoutVersion()); - return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) .setNumDatanodes(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java index fd1a60128de1..37ec1a32471d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java @@ -57,7 +57,7 @@ public static void init() throws Exception { // Disable filesystem snapshot feature for this test conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, false); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java index babc643ffa01..4b84f5c925ed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java @@ -54,7 +54,7 @@ public static void init() throws Exception { // Enable filesystem snapshot feature at the beginning conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test2") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 454019b4a8a4..091cd6fb1e06 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -177,7 +177,7 @@ public static void init() throws Exception { conf.set(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, "10s"); conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2"); - clusterBuilder = MiniOzoneCluster.newOMHABuilder(conf) + clusterBuilder = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java index 1d3ddb08a684..11f655ae5f86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java @@ -70,7 +70,7 @@ public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java index 83386693d7dd..c3a9c075d11c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java @@ -151,7 +151,7 @@ public void init(TestInfo testInfo) throws Exception { OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, SNAPSHOT_THRESHOLD); int numOfOMs = 3; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index e9b7e59b4fd6..734cf912cb16 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -110,7 +110,7 @@ public static void init() throws Exception { final String omServiceId = "om-service-test-1" + RandomStringUtils.randomNumeric(32); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index 643191b36d41..c91cc29a8dcb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -66,7 +66,7 @@ public void init() throws Exception { omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index e0d01c148d6b..226e707f33f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -105,7 +105,7 @@ public void init() throws Exception { String serviceID = OM_SERVICE_ID + RandomStringUtils.randomNumeric(5); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(serviceID) .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 8baad9cb97b4..49b629f745ab 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -73,7 +73,7 @@ public void setup() throws Exception { dbConf.setSyncOption(true); conf.setFromObject(dbConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(OM_SERVICE_ID) .setNumDatanodes(1) .setNumOfOzoneManagers(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 9d0552a169fe..8bb6313cf73f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -96,7 +96,7 @@ public void init() throws Exception { conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfActiveSCMs(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 34e63a4a0110..00450985f10c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -184,7 +184,7 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .setNumDatanodes(numDNs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 6abfbed2bd38..548a8832be83 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -153,7 +153,7 @@ public static void init() throws Exception { // Init cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .withoutDatanodes() // Remove this once we are actually writing data diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java index 3a36f8eaba3c..97a43c248a14 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestReconfigShell.java @@ -67,7 +67,7 @@ public class TestReconfigShell { public static void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(1) .setNumOfStorageContainerManagers(1) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 2e1b7a78736f..0324d030afab 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -45,7 +45,7 @@ public static void init() throws Exception { // Init HA cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newOMHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); From 9c2fb3aebded4695d2b3de929d7fb67b42f1072d Mon Sep 17 00:00:00 2001 From: Raju Balpande <146973984+raju-balpande@users.noreply.github.com> Date: Mon, 26 Feb 2024 18:21:02 +0530 Subject: [PATCH 061/108] HDDS-10346. Make test cases in TestSstFilteringService independent (#6247) --- .../ozone/om/TestSstFilteringService.java | 49 ++++++++++++------- 1 file changed, 30 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java index 2654f4339ab4..25fdaa908230 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java @@ -38,11 +38,8 @@ import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Order; -import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.MethodOrderer.OrderAnnotation; import org.junit.jupiter.api.io.TempDir; import org.rocksdb.LiveFileMetaData; @@ -75,7 +72,6 @@ * Test SST Filtering Service. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -@TestMethodOrder(OrderAnnotation.class) public class TestSstFilteringService { private static final String SST_FILE_EXTENSION = ".sst"; private OzoneManagerProtocol writeClient; @@ -133,7 +129,6 @@ public void cleanup() throws Exception { * @throws IOException - on Failure. */ @Test - @Order(1) public void testIrrelevantSstFileDeletion() throws Exception { RDBStore activeDbStore = (RDBStore) om.getMetadataManager().getStore(); @@ -141,11 +136,24 @@ public void testIrrelevantSstFileDeletion() keyManager.getSnapshotSstFilteringService(); final int keyCount = 100; - String volumeName = "vol1"; + String volumeName = "volz"; String bucketName1 = "buck1"; createVolume(volumeName); addBucketToVolume(volumeName, bucketName1); + long countExistingSnapshots = filteringService.getSnapshotFilteredCount().get(); + List previousFiles = activeDbStore.getDb().getSstFileList(); + List listPreviousFiles = new ArrayList(); + int level0FilesCountDiff = 0; + int totalFileCountDiff = 0; + for (LiveFileMetaData fileMetaData : previousFiles) { + totalFileCountDiff++; + listPreviousFiles.add(fileMetaData.fileName()); + if (fileMetaData.level() == 0) { + level0FilesCountDiff++; + } + } + createKeys(volumeName, bucketName1, keyCount / 2); activeDbStore.getDb().flush(OmMetadataManagerImpl.KEY_TABLE); @@ -155,8 +163,7 @@ public void testIrrelevantSstFileDeletion() int level0FilesCount = 0; int totalFileCount = 0; - List initialsstFileList = - activeDbStore.getDb().getSstFileList(); + List initialsstFileList = activeDbStore.getDb().getSstFileList(); for (LiveFileMetaData fileMetaData : initialsstFileList) { totalFileCount++; if (fileMetaData.level() == 0) { @@ -164,18 +171,18 @@ public void testIrrelevantSstFileDeletion() } } - assertEquals(totalFileCount, level0FilesCount); + assertEquals(totalFileCount - totalFileCountDiff, level0FilesCount - level0FilesCountDiff); activeDbStore.getDb().compactRange(OmMetadataManagerImpl.KEY_TABLE); int nonLevel0FilesCountAfterCompact = 0; - List nonLevelOFiles = new ArrayList<>(); + List nonLevelOFiles = new ArrayList<>(); for (LiveFileMetaData fileMetaData : activeDbStore.getDb() .getSstFileList()) { if (fileMetaData.level() != 0) { nonLevel0FilesCountAfterCompact++; - nonLevelOFiles.add(fileMetaData); + nonLevelOFiles.add(fileMetaData.fileName()); } } @@ -192,8 +199,8 @@ public void testIrrelevantSstFileDeletion() SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketName2, snapshotName1)); assertFalse(snapshotInfo.isSstFiltered()); - waitForSnapshotsAtLeast(filteringService, 1); - assertEquals(1, filteringService.getSnapshotFilteredCount().get()); + waitForSnapshotsAtLeast(filteringService, countExistingSnapshots + 1); + assertEquals(countExistingSnapshots + 1, filteringService.getSnapshotFilteredCount().get()); Set keysFromActiveDb = getKeysFromDb(om.getMetadataManager(), volumeName, bucketName2); @@ -208,17 +215,23 @@ public void testIrrelevantSstFileDeletion() OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); for (LiveFileMetaData file : allFiles) { + //Skipping the previous files from this check even those also works. + if (listPreviousFiles.contains(file.fileName())) { + continue; + } File sstFile = new File(snapshotDirName + OM_KEY_PREFIX + file.fileName()); - if (nonLevelOFiles.stream() - .anyMatch(o -> file.fileName().equals(o.fileName()))) { + if (nonLevelOFiles.contains(file.fileName())) { assertFalse(sstFile.exists()); } else { assertTrue(sstFile.exists()); } } - assertTrue(snapshotInfo.isSstFiltered()); + // Need to read the sstFiltered flag which is set in background process and + // hence snapshotInfo.isSstFiltered() may not work sometimes. + assertTrue(om.getMetadataManager().getSnapshotInfoTable().get(SnapshotInfo + .getTableKey(volumeName, bucketName2, snapshotName1)).isSstFiltered()); String snapshotName2 = "snapshot2"; final long count; @@ -228,7 +241,7 @@ public void testIrrelevantSstFileDeletion() createSnapshot(volumeName, bucketName2, snapshotName2); assertThrows(TimeoutException.class, - () -> waitForSnapshotsAtLeast(filteringService, count + 1)); + () -> waitForSnapshotsAtLeast(filteringService, count + 1 + countExistingSnapshots)); assertEquals(count, filteringService.getSnapshotFilteredCount().get()); } @@ -242,7 +255,6 @@ public void testIrrelevantSstFileDeletion() } @Test - @Order(2) public void testActiveAndDeletedSnapshotCleanup() throws Exception { RDBStore activeDbStore = (RDBStore) om.getMetadataManager().getStore(); String volumeName = "volume1"; @@ -374,7 +386,6 @@ private void createKey(OzoneManagerProtocol managerProtocol, * snapshot bucket. */ @Test - @Order(3) public void testSstFilteringService() throws Exception { RDBStore activeDbStore = (RDBStore) om.getMetadataManager().getStore(); String volumeName = "volume"; From 2d77fb401621d295ac9f0af5a7d9c2b20bc24968 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 27 Feb 2024 07:43:11 +0100 Subject: [PATCH 062/108] HDDS-10423. Datanode fails to start with invalid checksum size setting (#6276) --- .../hdds/scm/TestOzoneClientConfig.java | 38 +++++++++++++++++++ .../apache/hadoop/hdds/conf/ConfigType.java | 6 +-- 2 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java new file mode 100644 index 000000000000..88f27eae6dff --- /dev/null +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +class TestOzoneClientConfig { + + @Test + void missingSizeSuffix() { + final int bytes = 1024; + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt("ozone.client.bytes.per.checksum", bytes); + + OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); + + assertEquals(bytes, subject.getBytesPerChecksum()); + } +} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java index 4ed59669a9df..e121e4333a0d 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigType.java @@ -118,7 +118,7 @@ void set(ConfigurationTarget target, String key, Object value, SIZE { @Override Object parse(String value, Config config, Class type, String key) { - StorageSize measure = StorageSize.parse(value); + StorageSize measure = StorageSize.parse(value, config.sizeUnit()); long val = Math.round(measure.getUnit().toBytes(measure.getValue())); if (type == int.class) { return (int) val; @@ -130,9 +130,9 @@ Object parse(String value, Config config, Class type, String key) { void set(ConfigurationTarget target, String key, Object value, Config config) { if (value instanceof Long) { - target.setStorageSize(key, (long) value, StorageUnit.BYTES); + target.setStorageSize(key, (long) value, config.sizeUnit()); } else if (value instanceof Integer) { - target.setStorageSize(key, (int) value, StorageUnit.BYTES); + target.setStorageSize(key, (int) value, config.sizeUnit()); } else { throw new ConfigurationException("Unsupported type " + value.getClass() + " for " + key); From f6d455fd1945f5584c9b9d175fe9b8eac6784958 Mon Sep 17 00:00:00 2001 From: Ashish Kumar <117710273+ashishkumar50@users.noreply.github.com> Date: Tue, 27 Feb 2024 12:38:51 +0530 Subject: [PATCH 063/108] HDDS-10360. Make cleanupTest compatible with enableFileSystemPath values in TestKeyManagerImpl class. (#6218) --- .../hadoop/ozone/om/TestKeyManagerImpl.java | 50 +++++++++++-------- 1 file changed, 30 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index ba98a28280a9..67b09a1434a7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -37,6 +37,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -108,7 +110,9 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; @@ -116,12 +120,14 @@ import jakarta.annotation.Nonnull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -162,7 +168,7 @@ public class TestKeyManagerImpl { private static final String KEY_NAME = "key1"; private static final String BUCKET_NAME = "bucket1"; private static final String BUCKET2_NAME = "bucket2"; - private static final String VERSIONED_BUCKET_NAME = "versionedBucket1"; + private static final String VERSIONED_BUCKET_NAME = "versionedbucket1"; private static final String VOLUME_NAME = "vol1"; private static OzoneManagerProtocol writeClient; private static OzoneManager om; @@ -174,6 +180,9 @@ public static void setUp() throws Exception { dir = GenericTestUtils.getRandomizedTestDir(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); + final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, + conf.get(OZONE_OM_ADDRESS_KEY)); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); nodeManager = new MockNodeManager(true, 10); NodeSchema[] schemas = new NodeSchema[] @@ -222,9 +231,6 @@ public static void setUp() throws Exception { new SCMException("SafeModePrecheck failed for allocateBlock", ResultCodes.SAFE_MODE_EXCEPTION)); createVolume(VOLUME_NAME); - createBucket(VOLUME_NAME, BUCKET_NAME, false); - createBucket(VOLUME_NAME, BUCKET2_NAME, false); - createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); } @AfterAll @@ -235,21 +241,21 @@ public static void cleanup() throws Exception { FileUtils.deleteDirectory(dir); } + @BeforeEach + public void init() throws Exception { + createBucket(VOLUME_NAME, BUCKET_NAME, false); + createBucket(VOLUME_NAME, BUCKET2_NAME, false); + createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); + } + @AfterEach public void cleanupTest() throws IOException { mockContainerClient(); - List fileStatuses = keyManager - .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); - for (OzoneFileStatus fileStatus : fileStatuses) { - if (fileStatus.isFile()) { - writeClient.deleteKey( - createKeyArgs(fileStatus.getKeyInfo().getKeyName())); - } else { - writeClient.deleteKey(createKeyArgs(OzoneFSUtils - .addTrailingSlashIfNeeded( - fileStatus.getKeyInfo().getKeyName()))); - } - } + org.apache.hadoop.fs.Path volumePath = new org.apache.hadoop.fs.Path(OZONE_URI_DELIMITER, VOLUME_NAME); + FileSystem fs = FileSystem.get(conf); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, BUCKET2_NAME), true); + fs.delete(new org.apache.hadoop.fs.Path(volumePath, VERSIONED_BUCKET_NAME), true); } private static void mockContainerClient() { @@ -1009,8 +1015,10 @@ public void testListStatusWithTableCache() throws Exception { } } - @Test - public void testListStatusWithTableCacheRecursive() throws Exception { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListStatusWithTableCacheRecursive(boolean enablePath) throws Exception { + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, enablePath); String keyNameDir1 = "dir1"; OmKeyArgs keyArgsDir1 = createBuilder().setKeyName(keyNameDir1).build(); @@ -1194,8 +1202,10 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { assertTrue(existKeySet.isEmpty()); } - @Test - public void testListStatus() throws IOException { + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testListStatus(boolean enablePath) throws IOException { + conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, enablePath); String superDir = RandomStringUtils.randomAlphabetic(5); int numDirectories = 5; From 083a45ee0b774c62a481d029d3de3812ccc63e2c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 27 Feb 2024 08:29:22 +0100 Subject: [PATCH 064/108] Revert "HDDS-10384. RPC client Reusing thread resources. (#6270)" (#6277) This reverts commit 84c6e4d861d907d1ac39e252aa97e8a512ef247b. --- .../hdds/scm/storage/AbstractCommitWatcher.java | 2 +- .../hadoop/hdds/scm/storage/BlockOutputStream.java | 5 ++++- .../reconstruction/ECReconstructionCoordinator.java | 3 ++- .../hadoop/ozone/client/io/ECKeyOutputStream.java | 11 +++++++---- .../apache/hadoop/ozone/client/rpc/RpcClient.java | 3 ++- .../ozone/client/rpc/TestOzoneAtRestEncryption.java | 12 +++--------- 6 files changed, 19 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 957f761ccbc2..0c5501c7922c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -73,7 +73,7 @@ SortedMap> getCommitIndexMap() { return commitIndexMap; } - synchronized void updateCommitInfoMap(long index, List buffers) { + void updateCommitInfoMap(long index, List buffers) { commitIndexMap.computeIfAbsent(index, k -> new LinkedList<>()) .addAll(buffers); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 5c0516d7bd4f..5ff5da60989e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -25,6 +25,7 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; @@ -181,7 +182,8 @@ public BlockOutputStream( (long) flushPeriod * streamBufferArgs.getStreamBufferSize() == streamBufferArgs .getStreamBufferFlushSize()); - this.responseExecutor = blockOutputStreamResourceProvider.get(); + // A single thread executor handle the responses of async requests + responseExecutor = Executors.newSingleThreadExecutor(); bufferList = null; totalDataFlushedLength = 0; writtenDataLength = 0; @@ -655,6 +657,7 @@ public void cleanup(boolean invalidateClient) { bufferList.clear(); } bufferList = null; + responseExecutor.shutdown(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 90756bbc8898..a45c15844847 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -101,7 +101,8 @@ public class ECReconstructionCoordinator implements Closeable { private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; - private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 5; + // TODO: Adjusts to the appropriate value when the ec-reconstruct-writer thread pool is used. + private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 0; private final ECContainerOperationClient containerOperationClient; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java index 0cb3973e0411..878558073f75 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java @@ -43,6 +43,8 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -64,6 +66,7 @@ public final class ECKeyOutputStream extends KeyOutputStream private final int numParityBlks; private final ByteBufferPool bufferPool; private final RawErasureEncoder encoder; + private final ExecutorService flushExecutor; private final Future flushFuture; private final AtomicLong flushCheckpoint; @@ -116,13 +119,12 @@ private ECKeyOutputStream(Builder builder) { this.writeOffset = 0; this.encoder = CodecUtil.createRawEncoderWithFallback( builder.getReplicationConfig()); + this.flushExecutor = Executors.newSingleThreadExecutor(); S3Auth s3Auth = builder.getS3CredentialsProvider().get(); ThreadLocal s3CredentialsProvider = builder.getS3CredentialsProvider(); - this.flushFuture = builder.getExecutorServiceSupplier().get().submit(() -> { - s3CredentialsProvider.set(s3Auth); - return flushStripeFromQueue(); - }); + flushExecutor.submit(() -> s3CredentialsProvider.set(s3Auth)); + this.flushFuture = this.flushExecutor.submit(this::flushStripeFromQueue); this.flushCheckpoint = new AtomicLong(0); this.atomicKeyCreation = builder.getAtomicKeyCreation(); } @@ -493,6 +495,7 @@ public void close() throws IOException { } catch (InterruptedException e) { throw new IOException("Flushing thread was interrupted", e); } finally { + flushExecutor.shutdownNow(); closeCurrentStreamEntry(); blockOutputStreamEntryPool.cleanup(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index a6830ba9f771..74b22e7ca4c6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -196,7 +196,8 @@ public class RpcClient implements ClientProtocol { // for reconstruction. private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; - private static final int WRITE_POOL_MIN_SIZE = 1; + // TODO: Adjusts to the appropriate value when the writeThreadPool is used. + private static final int WRITE_POOL_MIN_SIZE = 0; private final ConfigurationSource conf; private final OzoneManagerClientProtocol ozoneManagerClient; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 44303ed2ff23..29cf1bc5e117 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -213,14 +213,6 @@ static void shutdown() throws IOException { } } - static void reInitClient() throws IOException { - ozClient = OzoneClientFactory.getRpcClient(conf); - store = ozClient.getObjectStore(); - TestOzoneRpcClient.setOzClient(ozClient); - TestOzoneRpcClient.setStore(store); - } - - @ParameterizedTest @EnumSource void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { @@ -778,7 +770,9 @@ void testGetKeyProvider() throws Exception { KeyProvider kp3 = ozClient.getObjectStore().getKeyProvider(); assertNotEquals(kp3, kpSpy); - reInitClient(); + // Restore ozClient and store + TestOzoneRpcClient.setOzClient(OzoneClientFactory.getRpcClient(conf)); + TestOzoneRpcClient.setStore(ozClient.getObjectStore()); } private static RepeatedOmKeyInfo getMatchedKeyInfo( From 7939faf7d6c904bf1e4ad32baa5d6d0c1de19003 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Tue, 27 Feb 2024 17:33:44 +0530 Subject: [PATCH 065/108] HDDS-815. Rename HDDS config keys prefixed with dfs. (#6274) --- .../hadoop/hdds/conf/OzoneConfiguration.java | 63 +++++++++++++++- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 38 +++++----- .../apache/hadoop/ozone/OzoneConfigKeys.java | 22 +++--- .../src/main/resources/ozone-default.xml | 71 ++++++++++--------- .../feature/Streaming-Write-Pipeline.md | 4 +- .../runConfigurations/Datanode2-ha.xml | 2 +- .../intellij/runConfigurations/Datanode2.xml | 2 +- .../runConfigurations/Datanode3-ha.xml | 2 +- .../intellij/runConfigurations/Datanode3.xml | 2 +- .../src/main/compose/ozone-ha/docker-config | 2 +- .../dist/src/main/compose/ozone/docker-config | 2 +- .../main/compose/ozonesecure-ha/docker-config | 2 +- .../main/compose/ozonesecure/docker-config | 2 +- .../src/test/resources/ozone-site.xml | 7 +- 14 files changed, 144 insertions(+), 77 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index 69cce8db6d6b..ed897f898c0b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import com.google.common.base.Preconditions; +import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.server.RaftServerConfigKeys; import static java.util.Collections.unmodifiableSortedSet; @@ -323,7 +324,67 @@ private static void addDeprecatedKeys() { new DeprecationDelta("ozone.scm.chunk.layout", ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY), new DeprecationDelta("hdds.datanode.replication.work.dir", - OZONE_CONTAINER_COPY_WORKDIR) + OZONE_CONTAINER_COPY_WORKDIR), + new DeprecationDelta("dfs.container.chunk.write.sync", + OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY), + new DeprecationDelta("dfs.container.ipc", + OzoneConfigKeys.DFS_CONTAINER_IPC_PORT), + new DeprecationDelta("dfs.container.ipc.random.port", + OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.admin.port", + OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT), + new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), + new DeprecationDelta("dfs.container.ratis.datastream.enabled", + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED), + new DeprecationDelta("dfs.container.ratis.datastream.port", + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT), + new DeprecationDelta("dfs.container.ratis.datastream.random.port", + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.enabled", + ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY), + new DeprecationDelta("dfs.container.ratis.ipc", + OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT), + new DeprecationDelta("dfs.container.ratis.ipc.random.port", + OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT), + new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit", + ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit", + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements", + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.log.purge.gap", + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP), + new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit", + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), + new DeprecationDelta("dfs.container.ratis.log.queue.num-elements", + ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), + new DeprecationDelta("dfs.container.ratis.num.container.op.executors", + ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume", + ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), + new DeprecationDelta("dfs.container.ratis.replication.level", + ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + new DeprecationDelta("dfs.container.ratis.rpc.type", + ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", + ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.segment.size", + ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + new DeprecationDelta("dfs.container.ratis.server.port", + OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries", + ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), + new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", + ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions", + ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), + new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", + ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration", + ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), + new DeprecationDelta("dfs.ratis.snapshot.threshold", + ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY) }); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c6760451c693..e093a45af03d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -42,93 +42,93 @@ public final class ScmConfigKeys { "ozone.scm.db.dirs.permissions"; public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = "dfs.container.ratis.enabled"; + = "hdds.container.ratis.enabled"; public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT = false; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = "dfs.container.ratis.rpc.type"; + = "hdds.container.ratis.rpc.type"; public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME - = "dfs.container.ratis.num.write.chunk.threads.per.volume"; + = "hdds.container.ratis.num.write.chunk.threads.per.volume"; public static final int DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = "dfs.container.ratis.replication.level"; + = "hdds.container.ratis.replication.level"; public static final ReplicationLevel DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = "dfs.container.ratis.num.container.op.executors"; + = "hdds.container.ratis.num.container.op.executors"; public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = - "dfs.container.ratis.segment.size"; + "hdds.container.ratis.segment.size"; public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = - "dfs.container.ratis.segment.preallocated.size"; + "hdds.container.ratis.segment.preallocated.size"; public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - "dfs.container.ratis.statemachinedata.sync.timeout"; + "hdds.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); public static final String DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - "dfs.container.ratis.statemachinedata.sync.retries"; + "hdds.container.ratis.statemachinedata.sync.retries"; public static final String DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = - "dfs.container.ratis.statemachine.max.pending.apply-transactions"; + "hdds.container.ratis.statemachine.max.pending.apply-transactions"; // The default value of maximum number of pending state machine apply // transactions is kept same as default snapshot threshold. public static final int DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = 100000; public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.queue.num-elements"; + "hdds.container.ratis.log.queue.num-elements"; public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.queue.byte-limit"; + "hdds.container.ratis.log.queue.byte-limit"; public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = "4GB"; public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - "dfs.container.ratis.log.appender.queue.num-elements"; + "hdds.container.ratis.log.appender.queue.num-elements"; public static final int DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - "dfs.container.ratis.log.appender.queue.byte-limit"; + "hdds.container.ratis.log.appender.queue.byte-limit"; public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - "dfs.container.ratis.log.purge.gap"; + "hdds.container.ratis.log.purge.gap"; // TODO: Set to 1024 once RATIS issue around purge is fixed. public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - "dfs.container.ratis.leader.pending.bytes.limit"; + "hdds.container.ratis.leader.pending.bytes.limit"; public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - "dfs.ratis.server.retry-cache.timeout.duration"; + "hdds.ratis.server.retry-cache.timeout.duration"; public static final TimeDuration DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "dfs.ratis.leader.election.minimum.timeout.duration"; + "hdds.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - "dfs.ratis.snapshot.threshold"; + "hdds.ratis.snapshot.threshold"; public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 7bfda0184096..300711000934 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -37,7 +37,7 @@ @InterfaceStability.Unstable public final class OzoneConfigKeys { public static final String DFS_CONTAINER_IPC_PORT = - "dfs.container.ipc"; + "hdds.container.ipc.port"; public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -60,47 +60,47 @@ public final class OzoneConfigKeys { * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. */ public static final String DFS_CONTAINER_IPC_RANDOM_PORT = - "dfs.container.ipc.random.port"; + "hdds.container.ipc.random.port"; public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = - "dfs.container.ratis.datastream.random.port"; + "hdds.container.ratis.datastream.random.port"; public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = false; public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = - "dfs.container.chunk.write.sync"; + "hdds.container.chunk.write.sync"; public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; /** * Ratis Port where containers listen to. */ public static final String DFS_CONTAINER_RATIS_IPC_PORT = - "dfs.container.ratis.ipc"; + "hdds.container.ratis.ipc.port"; public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. */ public static final String DFS_CONTAINER_RATIS_ADMIN_PORT = - "dfs.container.ratis.admin.port"; + "hdds.container.ratis.admin.port"; public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; /** * Ratis Port where containers listen to server-to-server requests. */ public static final String DFS_CONTAINER_RATIS_SERVER_PORT = - "dfs.container.ratis.server.port"; + "hdds.container.ratis.server.port"; public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; /** * Ratis Port where containers listen to datastream requests. */ public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED - = "dfs.container.ratis.datastream.enabled"; + = "hdds.container.ratis.datastream.enabled"; public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT = false; public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT - = "dfs.container.ratis.datastream.port"; + = "hdds.container.ratis.datastream.port"; public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT = 9855; @@ -134,7 +134,7 @@ public final class OzoneConfigKeys { * a mini cluster is able to launch multiple containers on a node. */ public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = - "dfs.container.ratis.ipc.random.port"; + "hdds.container.ratis.ipc.random.port"; public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; @@ -368,7 +368,7 @@ public final class OzoneConfigKeys { ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = - "dfs.container.ratis.datanode.storage.dir"; + "hdds.container.ratis.datanode.storage.dir"; public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index e5e3726beb5d..ee0aa4514a78 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -46,26 +46,26 @@ - dfs.container.ipc + hdds.container.ipc.port 9859 OZONE, CONTAINER, MANAGEMENT The ipc port number of container. - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. - dfs.container.ratis.datastream.random.port + hdds.container.ratis.datastream.random.port false OZONE, CONTAINER, RATIS, DATASTREAM Allocates a random free port for ozone container datastream. @@ -73,7 +73,7 @@ - dfs.container.ipc.random.port + hdds.container.ipc.random.port false OZONE, DEBUG, CONTAINER Allocates a random free port for ozone container. This is used @@ -82,7 +82,7 @@ - dfs.container.chunk.write.sync + hdds.container.chunk.write.sync false OZONE, CONTAINER, MANAGEMENT Determines whether the chunk writes in the container happen as @@ -90,19 +90,19 @@ - dfs.container.ratis.statemachinedata.sync.timeout + hdds.container.ratis.statemachinedata.sync.timeout 10s OZONE, DEBUG, CONTAINER, RATIS Timeout for StateMachine data writes by Ratis. - dfs.container.ratis.statemachinedata.sync.retries + hdds.container.ratis.statemachinedata.sync.retries OZONE, DEBUG, CONTAINER, RATIS Number of times the WriteStateMachineData op will be tried before failing. If the value is not configured, it will default - to (hdds.ratis.rpc.slowness.timeout / dfs.container.ratis.statemachinedata.sync.timeout), + to (hdds.ratis.rpc.slowness.timeout / hdds.container.ratis.statemachinedata.sync.timeout), which means that the WriteStatMachineData will be retried for every sync timeout until the configured slowness timeout is hit, after which the StateMachine will close down the pipeline. @@ -112,21 +112,22 @@ - dfs.container.ratis.log.queue.num-elements + hdds.container.ratis.log.queue.num-elements 1024 OZONE, DEBUG, CONTAINER, RATIS Limit for the number of operations in Ratis Log Worker. - dfs.container.ratis.log.queue.byte-limit + hdds.container.ratis.log.queue.byte-limit 4GB OZONE, DEBUG, CONTAINER, RATIS Byte limit for Ratis Log Worker queue. - dfs.container.ratis.log.appender.queue.num-elements + hdds.container.ratis.log.appender.queue.num-elements + 1 OZONE, DEBUG, CONTAINER, RATIS Limit for number of append entries in ratis leader's @@ -134,14 +135,16 @@ - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 32MB OZONE, DEBUG, CONTAINER, RATIS Byte limit for ratis leader's log appender queue. - dfs.container.ratis.log.purge.gap + hdds.container.ratis.log.purge.gap + 1000000 OZONE, DEBUG, CONTAINER, RATIS Purge gap between the last purged commit index @@ -149,7 +152,7 @@ - dfs.container.ratis.datanode.storage.dir + hdds.container.ratis.datanode.storage.dir OZONE, CONTAINER, STORAGE, MANAGEMENT, RATIS This directory is used for storing Ratis metadata like logs. If @@ -223,7 +226,7 @@ - dfs.container.ratis.enabled + hdds.container.ratis.enabled false OZONE, MANAGEMENT, PIPELINE, RATIS Ozone supports different kinds of replication pipelines. Ratis @@ -232,25 +235,26 @@ - dfs.container.ratis.ipc + hdds.container.ratis.ipc.port 9858 OZONE, CONTAINER, PIPELINE, RATIS The ipc port number of container for clients. - dfs.container.ratis.admin.port + hdds.container.ratis.admin.port 9857 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for admin requests. - dfs.container.ratis.server.port + hdds.container.ratis.server.port 9856 OZONE, CONTAINER, PIPELINE, RATIS, MANAGEMENT The ipc port number of container for server-server communication. - dfs.container.ratis.ipc.random.port + hdds.container.ratis.ipc.random.port + false OZONE,DEBUG Allocates a random free port for ozone ratis port for the @@ -259,7 +263,7 @@ - dfs.container.ratis.rpc.type + hdds.container.ratis.rpc.type GRPC OZONE, RATIS, MANAGEMENT Ratis supports different kinds of transports like netty, GRPC, @@ -268,7 +272,7 @@ - dfs.ratis.snapshot.threshold + hdds.ratis.snapshot.threshold 10000 OZONE, RATIS Number of transactions after which a ratis snapshot should be @@ -276,16 +280,16 @@ - dfs.container.ratis.statemachine.max.pending.apply-transactions + hdds.container.ratis.statemachine.max.pending.apply-transactions 10000 OZONE, RATIS Maximum number of pending apply transactions in a data pipeline. The default value is kept same as default snapshot threshold - dfs.ratis.snapshot.threshold. + hdds.ratis.snapshot.threshold. - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 10 OZONE, RATIS, PERFORMANCE Maximum number of threads in the thread pool that Datanode @@ -295,7 +299,8 @@ - dfs.container.ratis.leader.pending.bytes.limit + hdds.container.ratis.leader.pending.bytes.limit + 1GB OZONE, RATIS, PERFORMANCE Limit on the total bytes of pending requests after which @@ -303,7 +308,7 @@ - dfs.container.ratis.replication.level + hdds.container.ratis.replication.level MAJORITY OZONE, RATIS Replication level to be used by datanode for submitting a @@ -312,7 +317,7 @@ - dfs.container.ratis.num.container.op.executors + hdds.container.ratis.num.container.op.executors 10 OZONE, RATIS, PERFORMANCE Number of executors that will be used by Ratis to execute @@ -320,7 +325,7 @@ - dfs.container.ratis.segment.size + hdds.container.ratis.segment.size 64MB OZONE, RATIS, PERFORMANCE The size of the raft segment file used @@ -328,7 +333,7 @@ - dfs.container.ratis.segment.preallocated.size + hdds.container.ratis.segment.preallocated.size 4MB OZONE, RATIS, PERFORMANCE The pre-allocated file size for raft segment used @@ -336,13 +341,13 @@ - dfs.ratis.server.retry-cache.timeout.duration + hdds.ratis.server.retry-cache.timeout.duration 600000ms OZONE, RATIS, MANAGEMENT Retry Cache entry timeout for ratis server. - dfs.ratis.leader.election.minimum.timeout.duration + hdds.ratis.leader.election.minimum.timeout.duration 5s OZONE, RATIS, MANAGEMENT The minimum timeout duration for ratis leader election. @@ -707,7 +712,7 @@ For production clusters or any time you care about performance, it is recommended that ozone.om.db.dirs, ozone.scm.db.dirs and - dfs.container.ratis.datanode.storage.dir be configured separately. + hdds.container.ratis.datanode.storage.dir be configured separately. diff --git a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md index 5f55afebc3c8..e48a95c8bb9c 100644 --- a/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md +++ b/hadoop-hdds/docs/content/feature/Streaming-Write-Pipeline.md @@ -43,7 +43,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - To enable the Streaming Write Pipeline feature, set the following property to true. ```XML - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled false OZONE, CONTAINER, RATIS, DATASTREAM It specifies whether to enable data stream of container. @@ -52,7 +52,7 @@ Set the following properties to the Ozone configuration file `ozone-site.xml`. - Datanodes listen to the following port for the streaming traffic. ```XML - dfs.container.ratis.datastream.port + hdds.container.ratis.datastream.port 9855 OZONE, CONTAINER, RATIS, DATASTREAM The datastream port number of container. diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml index 171494aa5dbe..df9c4c0ab3e6 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2-ha.xml @@ -18,7 +18,7 @@ - dfs.container.ratis.num.write.chunk.threads.per.volume + hdds.container.ratis.num.write.chunk.threads.per.volume 4 @@ -52,7 +52,7 @@ - dfs.container.ratis.datastream.enabled + hdds.container.ratis.datastream.enabled true @@ -82,7 +82,8 @@ - dfs.container.ratis.log.appender.queue.byte-limit + hdds.container.ratis.log.appender.queue.byte-limit + 8MB From 0e413c9833bbc7e35ea1d1e4b5d833a87aa5ee16 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Wed, 28 Feb 2024 00:10:04 +0800 Subject: [PATCH 066/108] HDDS-10428. OzoneClientConfig#validate does not get called (#6282) --- .../java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java | 2 +- .../java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 65e466529773..3042b4d847a0 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -231,7 +231,7 @@ public enum ChecksumCombineMode { private String fsDefaultBucketLayout = "FILE_SYSTEM_OPTIMIZED"; @PostConstruct - private void validate() { + public void validate() { Preconditions.checkState(streamBufferSize > 0); Preconditions.checkState(streamBufferFlushSize > 0); Preconditions.checkState(streamBufferMaxSize > 0); diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java index 88f27eae6dff..0dd29cb50a45 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/TestOzoneClientConfig.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.junit.jupiter.api.Test; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; class TestOzoneClientConfig { @@ -33,6 +34,6 @@ void missingSizeSuffix() { OzoneClientConfig subject = conf.getObject(OzoneClientConfig.class); - assertEquals(bytes, subject.getBytesPerChecksum()); + assertEquals(OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE, subject.getBytesPerChecksum()); } } From 54548aa76836dc9acfdb5fb62f0b0e8bb169b1b4 Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Tue, 27 Feb 2024 23:09:13 +0530 Subject: [PATCH 067/108] HDDS-10327. S3G does not work in a single-node deployment (#6257) --- .../java/org/apache/hadoop/ozone/client/rpc/RpcClient.java | 4 ++-- .../org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 3 ++- .../java/org/apache/hadoop/ozone/s3/util/S3StorageType.java | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 74b22e7ca4c6..bcb08f1d9130 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1432,7 +1432,7 @@ public OzoneDataStreamOutput createStreamKey( if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(keyName); } - HddsClientUtils.checkNotNull(keyName, replicationConfig); + HddsClientUtils.checkNotNull(keyName); OmKeyArgs.Builder builder = new OmKeyArgs.Builder() .setVolumeName(volumeName) @@ -1819,7 +1819,7 @@ public OmMultipartInfo initiateMultipartUpload(String volumeName, HddsClientUtils.checkNotNull(keyName); if (omVersion .compareTo(OzoneManagerVersion.ERASURE_CODED_STORAGE_SUPPORT) < 0) { - if (replicationConfig.getReplicationType() + if (replicationConfig != null && replicationConfig.getReplicationType() == HddsProtos.ReplicationType.EC) { throw new IOException("Can not set the replication of the file to" + " Erasure Coded replication, as OzoneManager does not support" diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 0514125abd10..1d3850b12ac9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -772,7 +772,8 @@ public Response initializeMultipartUpload( private ReplicationConfig getReplicationConfig(OzoneBucket ozoneBucket, String storageType) throws OS3Exception { if (StringUtils.isEmpty(storageType)) { - storageType = S3StorageType.getDefault(ozoneConfiguration).toString(); + S3StorageType defaultStorageType = S3StorageType.getDefault(ozoneConfiguration); + storageType = (defaultStorageType != null ? defaultStorageType.toString() : null); } ReplicationConfig clientConfiguredReplicationConfig = null; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java index ae42e812fb3e..9eb88989a32e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/util/S3StorageType.java @@ -62,6 +62,10 @@ public ReplicationType getType() { public static S3StorageType getDefault(ConfigurationSource config) { String replicationString = config.get(OzoneConfigKeys.OZONE_REPLICATION); ReplicationFactor configFactor; + if (replicationString == null) { + // if no config is set then let server take decision + return null; + } try { configFactor = ReplicationFactor.valueOf( Integer.parseInt(replicationString)); From 9fb61ffa3b79a7cdcb2d33da81c46b1fa55d5445 Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Wed, 28 Feb 2024 02:35:59 +0530 Subject: [PATCH 068/108] HDDS-10413. Recon - UnsupportedOperationException while merging Incremental Container Reports. (#6260) Co-authored-by: deveshsingh --- .../SCMDatanodeHeartbeatDispatcher.java | 14 +++++++--- .../hadoop/ozone/recon/TestReconUtils.java | 25 +++++++++++++++++ ...econIncrementalContainerReportHandler.java | 27 +++++++++++++++++++ 3 files changed, 62 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java index b6dc6f599bd6..484a1e6f0f4b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java @@ -213,7 +213,7 @@ public static class ReportFromDatanode { private final DatanodeDetails datanodeDetails; - private final T report; + private T report; public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) { this.datanodeDetails = datanodeDetails; @@ -227,6 +227,10 @@ public DatanodeDetails getDatanodeDetails() { public T getReport() { return report; } + + public void setReport(T report) { + this.report = report; + } } /** @@ -381,9 +385,11 @@ public String getEventId() { @Override public void mergeReport(ContainerReport nextReport) { if (nextReport.getType() == ContainerReportType.ICR) { - getReport().getReportList().addAll( - ((ReportFromDatanode) nextReport) - .getReport().getReportList()); + // To update existing report list , need to create a builder and then + // merge new reports to existing report list. + IncrementalContainerReportProto reportProto = getReport().toBuilder().addAllReport( + ((ReportFromDatanode) nextReport).getReport().getReportList()).build(); + setReport(reportProto); } } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index f49826e67d81..d5962c0c407d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -45,7 +45,11 @@ import org.apache.commons.lang3.RandomUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -53,6 +57,7 @@ * Test Recon Utility methods. */ public class TestReconUtils { + private static PipelineID randomPipelineID = PipelineID.randomId(); @TempDir private Path temporaryFolder; @@ -234,4 +239,24 @@ private static int oldNextClosestPowerIndexOfTwo(long dataSize) { } return index; } + + private static ContainerInfo.Builder getDefaultContainerInfoBuilder( + final HddsProtos.LifeCycleState state) { + return new ContainerInfo.Builder() + .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig( + RatisReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.THREE)) + .setState(state) + .setSequenceId(10000L) + .setOwner("TEST"); + } + + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(randomPipelineID) + .build(); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index efde79f9bacb..3c572aa8e052 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -55,6 +56,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; +import org.apache.hadoop.ozone.recon.TestReconUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -165,6 +167,31 @@ public void testProcessICRStateMismatch() } } + @Test + public void testMergeMultipleICRs() { + final ContainerInfo container = TestReconUtils.getContainer(LifeCycleState.OPEN); + final DatanodeDetails datanodeOne = randomDatanodeDetails(); + final IncrementalContainerReportProto containerReport = + getIncrementalContainerReportProto(container.containerID(), + ContainerReplicaProto.State.CLOSED, + datanodeOne.getUuidString()); + final IncrementalContainerReportFromDatanode icrFromDatanode1 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + final IncrementalContainerReportFromDatanode icrFromDatanode2 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + assertEquals(1, icrFromDatanode1.getReport().getReportList().size()); + icrFromDatanode1.mergeReport(icrFromDatanode2); + assertEquals(2, icrFromDatanode1.getReport().getReportList().size()); + + final IncrementalContainerReportFromDatanode icrFromDatanode3 = + new IncrementalContainerReportFromDatanode( + datanodeOne, containerReport); + icrFromDatanode1.mergeReport(icrFromDatanode3); + assertEquals(3, icrFromDatanode1.getReport().getReportList().size()); + } + private LifeCycleState getContainerStateFromReplicaState( State state) { switch (state) { From 1e98ebb4491c5f3d77dd56497d4f96f87558f274 Mon Sep 17 00:00:00 2001 From: Duong Nguyen Date: Tue, 27 Feb 2024 14:47:40 -0800 Subject: [PATCH 069/108] HDDS-10432. Hadoop FS client write(byte[], int, int) is very slow in streaming (#6287) --- .../hadoop/ozone/client/io/ByteBufferOutputStream.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java index 174fd8c75f6d..19ce31c52932 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/ozone/client/io/ByteBufferOutputStream.java @@ -39,6 +39,11 @@ public void write(@Nonnull byte[] byteArray) throws IOException { write(ByteBuffer.wrap(byteArray)); } + @Override + public void write(@Nonnull byte[] byteArray, int off, int len) throws IOException { + write(ByteBuffer.wrap(byteArray), off, len); + } + @Override public void write(int b) throws IOException { write(new byte[]{(byte) b}); From e0bf7b4f7b11e2b90a4a45f0737f67f1579cb248 Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Wed, 28 Feb 2024 10:25:12 +0530 Subject: [PATCH 070/108] HDDS-10370. Recon - Handle the pre-existing missing empty containers in clusters. (#6255) --- .../hadoop/ozone/recon/TestReconTasks.java | 25 ++++++- .../ozone/recon/api/ContainerEndpoint.java | 3 + .../ozone/recon/fsck/ContainerHealthTask.java | 9 ++- .../ContainerHealthSchemaManager.java | 11 +++ .../recon/fsck/TestContainerHealthTask.java | 71 +++++++++++++++---- 5 files changed, 99 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index 44385698c5c3..cba7311b3b4f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -69,11 +69,11 @@ public void init() throws Exception { conf.set(HDDS_PIPELINE_REPORT_INTERVAL, "5s"); ReconTaskConfig taskConfig = conf.getObject(ReconTaskConfig.class); - taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(15)); + taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(10)); conf.setFromObject(taskConfig); conf.set("ozone.scm.stale.node.interval", "6s"); - conf.set("ozone.scm.dead.node.interval", "10s"); + conf.set("ozone.scm.dead.node.interval", "8s"); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .includeRecon(true).build(); cluster.waitForClusterToBeReady(); @@ -246,6 +246,7 @@ public void testEmptyMissingContainerDownNode() throws Exception { return (allEmptyMissingContainers.size() == 1); }); + // Now add a container to key mapping count as 3. This data is used to // identify if container is empty in terms of keys mapped to container. try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { @@ -274,6 +275,26 @@ public void testEmptyMissingContainerDownNode() throws Exception { return (allEmptyMissingContainers.isEmpty()); }); + // Now remove keys from container. This data is used to + // identify if container is empty in terms of keys mapped to container. + try (RDBBatchOperation rdbBatchOperation = new RDBBatchOperation()) { + reconContainerMetadataManager + .batchStoreContainerKeyCounts(rdbBatchOperation, containerID, 0L); + reconContainerMetadataManager.commitBatchOperation(rdbBatchOperation); + } + + // Check existing container state in UNHEALTHY_CONTAINER table + // will be updated as EMPTY_MISSING + LambdaTestUtils.await(25000, 1000, () -> { + List allEmptyMissingContainers = + reconContainerManager.getContainerSchemaManager() + .getUnhealthyContainers( + ContainerSchemaDefinition.UnHealthyContainerStates. + EMPTY_MISSING, + 0, 1000); + return (allEmptyMissingContainers.size() == 1); + }); + // Now restart the cluster and verify the container is no longer missing. cluster.restartHddsDatanode(pipeline.getFirstNode(), true); LambdaTestUtils.await(25000, 1000, () -> { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index d838e9c36e57..06f175c0dc4e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -397,6 +397,9 @@ public Response getUnhealthyContainers( summary = containerHealthSchemaManager.getUnhealthyContainersSummary(); List containers = containerHealthSchemaManager .getUnhealthyContainers(internalState, offset, limit); + containers.stream() + .filter( + container -> !container.getContainerState().equals(UnHealthyContainerStates.EMPTY_MISSING.toString())); for (UnhealthyContainers c : containers) { long containerID = c.getContainerId(); ContainerInfo containerInfo = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index a5d259d3e939..2284fe84e6d4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; import static org.apache.hadoop.ozone.recon.ReconConstants.TOTAL_KEYS; import static org.apache.hadoop.ozone.recon.ReconConstants.TOTAL_USED_BYTES; +import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.EMPTY_MISSING; /** @@ -295,6 +296,8 @@ private long processExistingDBRecords(long currentTime, rec.update(); } } else { + LOG.info("DELETED existing unhealthy container record...for Container: {}", + currentContainer.getContainerID()); rec.delete(); } } catch (ContainerNotFoundException cnf) { @@ -430,7 +433,7 @@ public static boolean retainOrUpdateRecord( boolean returnValue = false; switch (UnHealthyContainerStates.valueOf(rec.getContainerState())) { case MISSING: - returnValue = container.isMissing(); + returnValue = container.isMissing() && !container.isEmpty(); break; case MIS_REPLICATED: returnValue = keepMisReplicatedRecord(container, rec); @@ -495,10 +498,10 @@ public static List generateUnhealthyRecords( "starting with **Container State Stats:**"); } records.add( - recordForState(container, UnHealthyContainerStates.EMPTY_MISSING, + recordForState(container, EMPTY_MISSING, time)); populateContainerStats(container, - UnHealthyContainerStates.EMPTY_MISSING, + EMPTY_MISSING, unhealthyContainerStateStatsMap); } // A container cannot have any other records if it is missing so return diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java index 364aff103a51..0c13376fa526 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/ContainerHealthSchemaManager.java @@ -34,6 +34,9 @@ import org.jooq.DSLContext; import org.jooq.Record; import org.jooq.SelectQuery; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.List; /** @@ -41,6 +44,8 @@ */ @Singleton public class ContainerHealthSchemaManager { + private static final Logger LOG = + LoggerFactory.getLogger(ContainerHealthSchemaManager.class); private final UnhealthyContainersDao unhealthyContainersDao; private final ContainerSchemaDefinition containerSchemaDefinition; @@ -113,6 +118,12 @@ public Cursor getAllUnhealthyRecordsCursor() { } public void insertUnhealthyContainerRecords(List recs) { + if (LOG.isDebugEnabled()) { + recs.forEach(rec -> { + LOG.debug("rec.getContainerId() : {}, rec.getContainerState(): {} ", rec.getContainerId(), + rec.getContainerState()); + }); + } unhealthyContainersDao.insert(recs); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 001d44d9c203..8647639dd134 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.fsck; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.assertj.core.api.Assertions.assertThat; import static org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -38,6 +39,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicatedReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.TestContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; @@ -110,38 +113,61 @@ public void testRun() throws Exception { when(scmClientMock.getContainerWithPipeline(c.getContainerID())) .thenReturn(new ContainerWithPipeline(c, null)); } + + ReplicatedReplicationConfig replicationConfig = RatisReplicationConfig.getInstance(THREE); // Under replicated - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) + ContainerInfo containerInfo1 = + TestContainerInfo.newBuilderForTest().setContainerID(1).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(1L))).thenReturn(containerInfo1); + when(containerManagerMock.getContainerReplicas(containerInfo1.containerID())) .thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY)); // return all UNHEALTHY replicas for container ID 2 -> UNDER_REPLICATED - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) + ContainerInfo containerInfo2 = + TestContainerInfo.newBuilderForTest().setContainerID(2).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(2L))).thenReturn(containerInfo2); + when(containerManagerMock.getContainerReplicas(containerInfo2.containerID())) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); - // return 0 replicas for container ID 3 -> Missing - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) + // return 0 replicas for container ID 3 -> Empty Missing + ContainerInfo containerInfo3 = + TestContainerInfo.newBuilderForTest().setContainerID(3).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(3L))).thenReturn(containerInfo3); + when(containerManagerMock.getContainerReplicas(containerInfo3.containerID())) .thenReturn(Collections.emptySet()); // Return 5 Healthy -> Over replicated - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L))) + ContainerInfo containerInfo4 = + TestContainerInfo.newBuilderForTest().setContainerID(4).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(4L))).thenReturn(containerInfo4); + when(containerManagerMock.getContainerReplicas(containerInfo4.containerID())) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); // Mis-replicated + ContainerInfo containerInfo5 = + TestContainerInfo.newBuilderForTest().setContainerID(5).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(5L))).thenReturn(containerInfo5); Set misReplicas = getMockReplicas(5L, State.CLOSED, State.CLOSED, State.CLOSED); placementMock.setMisRepWhenDnPresent( misReplicas.iterator().next().getDatanodeDetails().getUuid()); - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L))) + when(containerManagerMock.getContainerReplicas(containerInfo5.containerID())) .thenReturn(misReplicas); // Return 3 Healthy -> Healthy container - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L))) + ContainerInfo containerInfo6 = + TestContainerInfo.newBuilderForTest().setContainerID(6).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(6L))).thenReturn(containerInfo6); + when(containerManagerMock.getContainerReplicas(containerInfo6.containerID())) .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 7 -> EMPTY_MISSING - when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(7L))) + // return 0 replicas for container ID 7 -> MISSING + ContainerInfo containerInfo7 = + TestContainerInfo.newBuilderForTest().setContainerID(7).setReplicationConfig(replicationConfig).build(); + when(containerManagerMock.getContainer(ContainerID.valueOf(7L))).thenReturn(containerInfo7); + when(containerManagerMock.getContainerReplicas(containerInfo7.containerID())) .thenReturn(Collections.emptySet()); List all = unHealthyContainersTableHandle.findAll(); @@ -150,7 +176,7 @@ public void testRun() throws Exception { long currentTime = System.currentTimeMillis(); ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); - reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); + reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(5)); when(reconContainerMetadataManager.getKeyCountForContainer( 7L)).thenReturn(5L); ContainerHealthTask containerHealthTask = @@ -215,7 +241,7 @@ public void testRun() throws Exception { .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); - // return 0 replicas for container ID 3 -> Still Missing + // return 0 replicas for container ID 3 -> Still empty Missing when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); @@ -227,7 +253,7 @@ public void testRun() throws Exception { // Was mis-replicated - make it healthy now placementMock.setMisRepWhenDnPresent(null); - LambdaTestUtils.await(6000, 1000, () -> + LambdaTestUtils.await(60000, 1000, () -> (unHealthyContainersTableHandle.count() == 4)); rec = unHealthyContainersTableHandle.fetchByContainerId(1L).get(0); assertEquals("UNDER_REPLICATED", rec.getContainerState()); @@ -252,6 +278,21 @@ public void testRun() throws Exception { // This container is now healthy, it should not be in the table any more assertEquals(0, unHealthyContainersTableHandle.fetchByContainerId(5L).size()); + + // Again make container Id 7 as empty which was missing as well, so in next + // container health task run, this container also should be deleted from + // UNHEALTHY_CONTAINERS table because we want to cleanup any existing + // EMPTY and MISSING containers from UNHEALTHY_CONTAINERS table. + when(reconContainerMetadataManager.getKeyCountForContainer(7L)).thenReturn(0L); + LambdaTestUtils.await(6000, 1000, () -> { + UnhealthyContainers emptyMissingContainer = unHealthyContainersTableHandle.fetchByContainerId(7L).get(0); + return ("EMPTY_MISSING".equals(emptyMissingContainer.getContainerState())); + }); + + // Just check once again that count doesn't change, only state of + // container 7 changes from MISSING to EMPTY_MISSING + LambdaTestUtils.await(60000, 1000, () -> + (unHealthyContainersTableHandle.count() == 4)); } @Test @@ -423,9 +464,9 @@ private List getMockContainers(int num) { when(c.getContainerID()).thenReturn((long)i); when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig.getInstance( - HddsProtos.ReplicationFactor.THREE)); + THREE)); when(c.getReplicationFactor()) - .thenReturn(HddsProtos.ReplicationFactor.THREE); + .thenReturn(THREE); when(c.getState()).thenReturn(HddsProtos.LifeCycleState.CLOSED); when(c.containerID()).thenReturn(ContainerID.valueOf(i)); containers.add(c); @@ -438,7 +479,7 @@ private ContainerInfo getMockDeletedContainer(int containerID) { when(c.getContainerID()).thenReturn((long)containerID); when(c.getReplicationConfig()) .thenReturn(RatisReplicationConfig - .getInstance(HddsProtos.ReplicationFactor.THREE)); + .getInstance(THREE)); when(c.containerID()).thenReturn(ContainerID.valueOf(containerID)); when(c.getState()).thenReturn(HddsProtos.LifeCycleState.DELETED); return c; From 8c4ab8e4cd41f6f03d8032aff72cdaaf0fe1c254 Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Wed, 28 Feb 2024 11:01:17 +0530 Subject: [PATCH 071/108] HDDS-8683. Container balancer thread interrupt may not work (#6179) --- .../hdds/scm/container/balancer/ContainerBalancer.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index 7b5cbe9f21fc..0e45b131363d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -342,10 +342,13 @@ private static void blockTillTaskStop(Thread balancingThread) { // NOTE: join should be called outside the lock in hierarchy // to avoid locking others waiting // wait for balancingThread to die with interrupt - balancingThread.interrupt(); LOG.info("Container Balancer waiting for {} to stop", balancingThread); try { - balancingThread.join(); + while (balancingThread.isAlive()) { + // retry interrupt every 5ms to avoid waiting when thread is sleeping + balancingThread.interrupt(); + balancingThread.join(5); + } } catch (InterruptedException exception) { Thread.currentThread().interrupt(); } From f4406546ac58e74bab9a9af1c9049e92f16e0a93 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 28 Feb 2024 09:05:36 +0100 Subject: [PATCH 072/108] HDDS-10416. Move HA-specific settings to MiniOzoneHAClusterImpl.Builder (#6275) --- .../hadoop/ozone/MiniOzoneChaosCluster.java | 2 +- .../hadoop/fs/ozone/TestOzoneFsHAURLs.java | 2 +- .../hdds/scm/TestSecretKeySnapshot.java | 4 +- .../hadoop/hdds/scm/TestSecretKeysApi.java | 1 - .../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 2 +- .../hdds/upgrade/TestScmHAFinalization.java | 2 +- .../managed/TestRocksObjectLeakDetector.java | 3 -- .../apache/hadoop/ozone/MiniOzoneCluster.java | 39 +---------------- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 42 +++++++++++++++++++ .../apache/hadoop/ozone/TestBlockTokens.java | 1 - .../hadoop/ozone/TestBlockTokensCLI.java | 1 - .../hadoop/ozone/TestMultipartObjectGet.java | 1 - .../ozone/om/snapshot/TestOmSnapshot.java | 1 - .../recon/TestReconWithOzoneManagerHA.java | 2 +- .../shell/TestOzoneContainerUpgradeShell.java | 6 --- .../ozone/shell/TestOzoneDebugShell.java | 1 - 16 files changed, 50 insertions(+), 60 deletions(-) diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 143ec59ddece..6469a631768c 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -287,7 +287,7 @@ public MiniOzoneChaosCluster build() throws IOException { DatanodeStoreCache.setMiniClusterMode(); initializeConfiguration(); - if (numOfOMs > 1) { + if (numberOfOzoneManagers() > 1) { initOMRatisConf(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 9099201a85e8..6f3a9bb5a173 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -108,9 +108,9 @@ static void initClass(@TempDir File tempDir) throws Exception { // Start the cluster cluster = MiniOzoneCluster.newHABuilder(conf) - .setNumDatanodes(5) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) + .setNumDatanodes(5) .build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java index f7a3aa9c9b7b..ebd68eb13edb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java @@ -121,10 +121,10 @@ public void init() throws Exception { MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKeySnapshot") .setSCMServiceId("SCMServiceId") - .setNumDatanodes(1) .setNumOfStorageContainerManagers(3) .setNumOfActiveSCMs(2) - .setNumOfOzoneManagers(1); + .setNumOfOzoneManagers(1) + .setNumDatanodes(1); cluster = (MiniOzoneHAClusterImpl) builder.build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index eb2442cd0988..405534af95eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -328,7 +328,6 @@ private void startCluster(int numSCMs) OzoneManager.setTestSecureOmFlag(true); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") - .setNumDatanodes(3) .setNumOfStorageContainerManagers(numSCMs) .setNumOfOzoneManagers(1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 97855f3775fb..33c09e2ce835 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -168,9 +168,9 @@ public static void initClass() { MiniOzoneCluster.Builder builder = new MiniOzoneHAClusterImpl.Builder(conf) - .setNumDatanodes(NUM_DATA_NODES) .setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) + .setNumDatanodes(NUM_DATA_NODES) .setDatanodeFactory(UniformDatanodesFactory.newBuilder() .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .build()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index a8af377e9846..da63a7de6b2c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -96,8 +96,8 @@ public void init(OzoneConfiguration conf, .setNumOfStorageContainerManagers(NUM_SCMS) .setNumOfActiveSCMs(NUM_SCMS - numInactiveSCMs) .setSCMServiceId("scmservice") - .setSCMConfigurator(configurator) .setNumOfOzoneManagers(1) + .setSCMConfigurator(configurator) .setNumDatanodes(NUM_DATANODES) .setDatanodeFactory(UniformDatanodesFactory.newBuilder() .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java index 4197ac8a8165..3239dfc1a47b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestRocksObjectLeakDetector.java @@ -50,10 +50,7 @@ static void setUp() throws IOException, InterruptedException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL"); - String omServiceId = "omServiceId1"; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(1) .build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index dbbde838d3ba..b10b021b69e8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -62,7 +62,7 @@ static Builder newBuilder(OzoneConfiguration conf) { * * @return MiniOzoneCluster builder */ - static Builder newHABuilder(OzoneConfiguration conf) { + static MiniOzoneHAClusterImpl.Builder newHABuilder(OzoneConfiguration conf) { return new MiniOzoneHAClusterImpl.Builder(conf); } @@ -271,13 +271,6 @@ abstract class Builder { protected String path; protected String clusterId; - protected String omServiceId; - protected int numOfOMs; - protected int numOfActiveOMs = ACTIVE_OMS_NOT_SET; - - protected String scmServiceId; - protected int numOfSCMs; - protected int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; protected SCMConfigurator scmConfigurator; protected String scmId = UUID.randomUUID().toString(); @@ -364,41 +357,11 @@ public Builder setDatanodeFactory(DatanodeFactory factory) { return this; } - public Builder setNumOfOzoneManagers(int numOMs) { - this.numOfOMs = numOMs; - return this; - } - - public Builder setNumOfActiveOMs(int numActiveOMs) { - this.numOfActiveOMs = numActiveOMs; - return this; - } - - public Builder setOMServiceId(String serviceId) { - this.omServiceId = serviceId; - return this; - } - public Builder includeRecon(boolean include) { this.includeRecon = include; return this; } - public Builder setNumOfStorageContainerManagers(int numSCMs) { - this.numOfSCMs = numSCMs; - return this; - } - - public Builder setNumOfActiveSCMs(int numActiveSCMs) { - this.numOfActiveSCMs = numActiveSCMs; - return this; - } - - public Builder setSCMServiceId(String serviceId) { - this.scmServiceId = serviceId; - return this; - } - /** * Constructs and returns MiniOzoneCluster. * diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 928ae907c65b..c33307c6906b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -344,6 +344,14 @@ public static class Builder extends MiniOzoneClusterImpl.Builder { private final List activeSCMs = new ArrayList<>(); private final List inactiveSCMs = new ArrayList<>(); + private String omServiceId; + private int numOfOMs; + private int numOfActiveOMs = ACTIVE_OMS_NOT_SET; + + private String scmServiceId; + private int numOfSCMs; + private int numOfActiveSCMs = ACTIVE_SCMS_NOT_SET; + /** * Creates a new Builder. * @@ -353,6 +361,36 @@ public Builder(OzoneConfiguration conf) { super(conf); } + public Builder setNumOfOzoneManagers(int numOMs) { + this.numOfOMs = numOMs; + return this; + } + + public Builder setNumOfActiveOMs(int numActiveOMs) { + this.numOfActiveOMs = numActiveOMs; + return this; + } + + public Builder setOMServiceId(String serviceId) { + this.omServiceId = serviceId; + return this; + } + + public Builder setNumOfStorageContainerManagers(int numSCMs) { + this.numOfSCMs = numSCMs; + return this; + } + + public Builder setNumOfActiveSCMs(int numActiveSCMs) { + this.numOfActiveSCMs = numActiveSCMs; + return this; + } + + public Builder setSCMServiceId(String serviceId) { + this.scmServiceId = serviceId; + return this; + } + @Override public MiniOzoneCluster build() throws IOException { if (numOfActiveOMs > numOfOMs) { @@ -402,6 +440,10 @@ public MiniOzoneCluster build() throws IOException { return cluster; } + protected int numberOfOzoneManagers() { + return numOfOMs; + } + protected void initOMRatisConf() { conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index a04c1236186c..9d05b54be8af 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -380,7 +380,6 @@ private static void startCluster() OzoneManager.setTestSecureOmFlag(true); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index a181a6f45e95..a42fbf2e8642 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -321,7 +321,6 @@ private static void startCluster() MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index cb49f3b320a0..0d0acfbd8c94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -106,7 +106,6 @@ private static void startCluster() MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) - .setNumDatanodes(3) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); cluster = (MiniOzoneHAClusterImpl) builder.build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index dafd43be04a4..8021b959849e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -211,7 +211,6 @@ private void init() throws Exception { conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); cluster = MiniOzoneCluster.newBuilder(conf) - .setNumOfOzoneManagers(3) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 49b629f745ab..4ba546c47dad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -75,8 +75,8 @@ public void setup() throws Exception { cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(OM_SERVICE_ID) - .setNumDatanodes(1) .setNumOfOzoneManagers(3) + .setNumDatanodes(1) .includeRecon(true) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java index a79e2de245da..3a9f7e322b9e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneContainerUpgradeShell.java @@ -80,7 +80,6 @@ public class TestOzoneContainerUpgradeShell { private static final Logger LOG = LoggerFactory.getLogger(TestOzoneContainerUpgradeShell.class); - private static String omServiceId; private static MiniOzoneCluster cluster = null; private static OzoneClient client; private static OzoneConfiguration conf = null; @@ -88,12 +87,7 @@ public class TestOzoneContainerUpgradeShell { private static final String BUCKET_NAME = UUID.randomUUID().toString(); protected static void startCluster() throws Exception { - // Init HA cluster - omServiceId = "om-service-test-upgrade-container1"; - final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) - .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index b50cea759ea4..15d9746fcb6a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -88,7 +88,6 @@ protected static void startCluster() throws Exception { omServiceId = "om-service-test1"; final int numDNs = 3; cluster = MiniOzoneCluster.newBuilder(conf) - .setOMServiceId(omServiceId) .setNumDatanodes(numDNs) .build(); cluster.waitForClusterToBeReady(); From 4da5a642243a42bba1aea6701d3d6b4b3a9462b4 Mon Sep 17 00:00:00 2001 From: Hongbing Wang <284734261@qq.com> Date: Wed, 28 Feb 2024 16:33:53 +0800 Subject: [PATCH 073/108] HDDS-10425. Increase OM transaction index for non-Ratis based on existing Ratis transactionInfoTable (#6281) --- .../ozone/om/TestOMEpochForNonRatis.java | 46 +++++++++++++++++++ .../apache/hadoop/ozone/om/OzoneManager.java | 13 +++--- 2 files changed, 53 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java index 991b3a66fb03..01ba4db399fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java @@ -24,6 +24,8 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.client.ObjectStore; @@ -42,11 +44,13 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OmUtils.EPOCH_ID_SHIFT; import static org.apache.hadoop.ozone.OmUtils.EPOCH_WHEN_RATIS_NOT_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests OM epoch generation for when Ratis is not enabled. @@ -145,6 +149,48 @@ public void testUniqueTrxnIndexOnOMRestart() throws Exception { assertEquals(4, om.getLastTrxnIndexForNonRatis()); } + @Test + public void testIncreaseTrxnIndexBasedOnExistingDB() throws Exception { + // Set transactionInfo.getTerm() not -1 to mock the DB migrated from ratis cluster. + // When OM is first started from the existing ratis DB, the transaction index for + // requests should not start from 0. It should incrementally increase from the last + // transaction index which was stored in DB transactionInfoTable before started. + + String volumeName = "volume" + RandomStringUtils.randomNumeric(5); + String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); + String keyName = "key" + RandomStringUtils.randomNumeric(5); + + OzoneManager om = cluster.getOzoneManager(); + ObjectStore objectStore = client.getObjectStore(); + + objectStore.createVolume(volumeName); + OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); + ozoneVolume.createBucket(bucketName); + + Table transactionInfoTable = om.getMetadataManager().getTransactionInfoTable(); + long initIndex = transactionInfoTable.get(TRANSACTION_INFO_KEY).getTransactionIndex(); + // Set transactionInfo.getTerm() = 1 to mock the DB migrated from ratis cluster + transactionInfoTable.put(TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, initIndex)); + TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); + // Verify transaction term != -1 and index > 1 + assertEquals(1, transactionInfo.getTerm()); + assertTrue(initIndex > 1); + + // Restart the OM and create new object + cluster.restartOzoneManager(); + + String data = "random data"; + OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName).createKey(keyName, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); + ozoneOutputStream.close(); + + // Transaction index after OM restart is incremented by 2 (create and commit op) from the last + // transaction index before OM restart rather than from 0. + // So, the transactionIndex should be (initIndex + 2) rather than (0 + 2) + assertEquals(initIndex + 2, + om.getMetadataManager().getTransactionInfoTable().get(TRANSACTION_INFO_KEY).getTransactionIndex()); + } + @Test public void testEpochIntegrationInObjectID() throws Exception { // Create a volume and check the objectID has the epoch as diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index fda68b416e4f..b6bd57ff6f51 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2153,15 +2153,16 @@ public long getObjectIdFromTxId(long trxnId) { long getLastTrxnIndexForNonRatis() throws IOException { TransactionInfo transactionInfo = TransactionInfo.readTransactionInfo(metadataManager); - // If the OMTransactionInfo does not exist in DB or if the term is not -1 - // (corresponding to non-Ratis cluster), return 0 so that new incoming + // If the OMTransactionInfo does not exist in DB, return 0 so that new incoming // requests can have transaction index starting from 1. - if (transactionInfo == null || transactionInfo.getTerm() != -1) { + if (transactionInfo == null) { return 0; } - // If there exists a last transaction index in DB, the new incoming - // requests in non-Ratis cluster must have transaction index - // incrementally increasing from the stored transaction index onwards. + // If there exists a last transaction index in DB, including two cases: + // 1. transactionInfo.getTerm() == -1 corresponds to a non-Ratis cluster + // 2. transactionInfo.getTerm() != -1 indicates that the DB may be migrated from Ratis cluster + // For both cases above, the new incoming requests in non-Ratis cluster must have + // transaction index incrementally increasing from the stored transaction index onwards. return transactionInfo.getTransactionIndex(); } From 543c9e79dd634557a62cdc8cb24da82cbe572e17 Mon Sep 17 00:00:00 2001 From: Aswin Shakil Balasubramanian Date: Wed, 28 Feb 2024 03:16:37 -0800 Subject: [PATCH 074/108] HDDS-9235. ReplicationManager metrics not collected after restart. (#6280) --- .../server-scm/dev-support/findbugsExcludeFile.xml | 5 +++++ .../container/replication/ReplicationManager.java | 1 + .../replication/ReplicationManagerMetrics.java | 13 +++++++++---- .../replication/TestContainerReplicaPendingOps.java | 8 ++++++++ .../replication/TestECUnderReplicationHandler.java | 8 ++++++++ .../replication/TestReplicationManager.java | 8 ++++++++ 6 files changed, 39 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml index 50f349186089..dc08720c9687 100644 --- a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml @@ -51,4 +51,9 @@ + + + + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index a3661243be69..32310ef9e7bf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -294,6 +294,7 @@ public synchronized void start() { if (!isRunning()) { LOG.info("Starting Replication Monitor Thread."); running = true; + metrics = ReplicationManagerMetrics.create(this); if (rmConf.isLegacyEnabled()) { legacyReplicationManager.setMetrics(metrics); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java index 5c3ee4e29aec..eb75db9bd504 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java @@ -235,10 +235,15 @@ public ReplicationManagerMetrics(ReplicationManager manager) { } public static ReplicationManagerMetrics create(ReplicationManager manager) { - return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, - "SCM Replication manager (closed container replication) related " - + "metrics", - new ReplicationManagerMetrics(manager)); + ReplicationManagerMetrics replicationManagerMetrics = (ReplicationManagerMetrics) + DefaultMetricsSystem.instance().getSource(METRICS_SOURCE_NAME); + if (replicationManagerMetrics == null) { + return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, + "SCM Replication manager (closed container replication) related " + + "metrics", + new ReplicationManagerMetrics(manager)); + } + return replicationManagerMetrics; } @Override diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java index a97cdbddb8af..3775531d30d1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestContainerReplicaPendingOps.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.ozone.test.TestClock; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -74,6 +75,13 @@ public void setup() { dn3 = MockDatanodeDetails.randomDatanodeDetails(); } + @AfterEach + void cleanup() { + if (metrics != null) { + metrics.unRegister(); + } + } + @Test public void testGetPendingOpsReturnsEmptyList() { List ops = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java index 22c3630e0c6b..f69822129365 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java @@ -47,6 +47,7 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.assertj.core.util.Lists; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -174,6 +175,13 @@ public NodeStatus getNodeStatus(DatanodeDetails dd) { .thenReturn(new ContainerPlacementStatusDefault(2, 2, 3)); } + @AfterEach + void cleanup() { + if (metrics != null) { + metrics.unRegister(); + } + } + @ParameterizedTest @ValueSource(strings = {"rs-6-3-1024k", "rs-10-4-1024k"}) void defersNonCriticalPartialReconstruction(String rep) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java index 47844f32fb0d..ecb3ce4b039d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java @@ -55,6 +55,7 @@ import org.apache.hadoop.util.Lists; import org.apache.ozone.test.TestClock; import org.apache.ratis.protocol.exceptions.NotLeaderException; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -190,6 +191,13 @@ public void setup() throws IOException { when(scmContext.getScm()).thenReturn(scm); } + @AfterEach + void cleanup() { + if (replicationManager.getMetrics() != null) { + replicationManager.getMetrics().unRegister(); + } + } + private ReplicationManager createReplicationManager() throws IOException { return new ReplicationManager( configuration, From 1830fe20556f608b7945df52d959f36f91c4350b Mon Sep 17 00:00:00 2001 From: Ivan Zlenko <241953+ivanzlenko@users.noreply.github.com> Date: Wed, 28 Feb 2024 17:27:39 +0500 Subject: [PATCH 075/108] HDDS-10367. Fix possible NPE in listKeysLight, listStatus, listStatusLight (#6221) --- ...ManagerProtocolClientSideTranslatorPB.java | 45 +++++---- .../hadoop/ozone/om/TestListKeysWithFSO.java | 27 +++++ .../hadoop/ozone/om/TestListStatus.java | 99 +++++++++---------- 3 files changed, 96 insertions(+), 75 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index dd201a42620d..08fa029833e7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -265,7 +265,6 @@ public final class OzoneManagerProtocolClientSideTranslatorPB private OmTransport transport; private ThreadLocal threadLocalS3Auth = new ThreadLocal<>(); - private boolean s3AuthCheck; public static final int BLOCK_ALLOCATION_RETRY_COUNT = 5; @@ -1044,7 +1043,7 @@ public ListKeysLightResult listKeysLight(String volumeName, reqBuilder.setBucketName(bucketName); reqBuilder.setCount(maxKeys); - if (StringUtils.isNotEmpty(startKey)) { + if (startKey != null) { reqBuilder.setStartKey(startKey); } @@ -2288,16 +2287,9 @@ public List listStatus(OmKeyArgs args, boolean recursive, .setSortDatanodes(args.getSortDatanodes()) .setLatestVersionLocation(args.getLatestVersionLocation()) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatus) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2324,16 +2316,9 @@ public List listStatusLight(OmKeyArgs args, .setSortDatanodes(false) .setLatestVersionLocation(true) .build(); - ListStatusRequest.Builder listStatusRequestBuilder = - ListStatusRequest.newBuilder() - .setKeyArgs(keyArgs) - .setRecursive(recursive) - .setStartKey(startKey) - .setNumEntries(numEntries); - if (allowPartialPrefixes) { - listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); - } + ListStatusRequest.Builder listStatusRequestBuilder = createListStatusRequestBuilder(keyArgs, recursive, startKey, + numEntries, allowPartialPrefixes); OMRequest omRequest = createOMRequest(Type.ListStatusLight) .setListStatusRequest(listStatusRequestBuilder.build()) @@ -2350,6 +2335,26 @@ public List listStatusLight(OmKeyArgs args, return statusList; } + private ListStatusRequest.Builder createListStatusRequestBuilder(KeyArgs keyArgs, boolean recursive, String startKey, + long numEntries, boolean allowPartialPrefixes) { + ListStatusRequest.Builder listStatusRequestBuilder = + ListStatusRequest.newBuilder() + .setKeyArgs(keyArgs) + .setRecursive(recursive) + .setNumEntries(numEntries); + + if (startKey != null) { + listStatusRequestBuilder.setStartKey(startKey); + } else { + listStatusRequestBuilder.setStartKey(""); + } + + if (allowPartialPrefixes) { + listStatusRequestBuilder.setAllowPartialPrefix(allowPartialPrefixes); + } + return listStatusRequestBuilder; + } + @Override public List listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries) throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java index f499e3569c8b..11594f3ef11c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListKeysWithFSO.java @@ -63,6 +63,8 @@ public class TestListKeysWithFSO { private static OzoneBucket fsoOzoneBucket; private static OzoneBucket legacyOzoneBucket2; private static OzoneBucket fsoOzoneBucket2; + private static OzoneBucket emptyLegacyOzoneBucket; + private static OzoneBucket emptyFsoOzoneBucket; private static OzoneClient client; /** @@ -105,6 +107,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(fsoBucketName, omBucketArgs); fsoOzoneBucket2 = ozoneVolume.getBucket(fsoBucketName); + fsoBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(fsoBucketName, omBucketArgs); + emptyFsoOzoneBucket = ozoneVolume.getBucket(fsoBucketName); + builder = BucketArgs.newBuilder(); builder.setStorageType(StorageType.DISK); builder.setBucketLayout(BucketLayout.LEGACY); @@ -113,6 +119,10 @@ public static void init() throws Exception { ozoneVolume.createBucket(legacyBucketName, omBucketArgs); legacyOzoneBucket2 = ozoneVolume.getBucket(legacyBucketName); + legacyBucketName = "bucket" + RandomStringUtils.randomNumeric(5); + ozoneVolume.createBucket(legacyBucketName, omBucketArgs); + emptyLegacyOzoneBucket = ozoneVolume.getBucket(legacyBucketName); + initFSNameSpace(); } @@ -479,6 +489,23 @@ public void testShallowListKeys() throws Exception { expectedKeys = getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); + + // case-7: keyPrefix corresponds to multiple existing keys and + // startKey is null in empty bucket + keyPrefix = "a1/b1/c12"; + startKey = null; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, emptyLegacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, emptyFsoOzoneBucket); + + // case-8: keyPrefix corresponds to multiple existing keys and + // startKey is null + keyPrefix = "a1/b1/c12"; + // a1/b1/c1222.tx + expectedKeys = + getExpectedKeyShallowList(keyPrefix, startKey, legacyOzoneBucket); + checkKeyShallowList(keyPrefix, startKey, expectedKeys, fsoOzoneBucket); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java index 52cb9287cc02..20977f9d4834 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java @@ -16,10 +16,10 @@ */ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -29,24 +29,30 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.List; +import java.util.stream.Stream; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.apache.hadoop.ozone.OzoneConfigKeys. - OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.params.provider.Arguments.arguments; /** * A simple test that asserts that list status output is sorted. */ @Timeout(1200) public class TestListStatus { + private static final Logger LOG = LoggerFactory.getLogger(TestListStatus.class); private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; private static OzoneBucket fsoOzoneBucket; private static OzoneClient client; @@ -54,11 +60,11 @@ public class TestListStatus { * Create a MiniDFSCluster for testing. *

* - * @throws IOException + * @throws IOException in case of I/O error */ @BeforeAll public static void init() throws Exception { - conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); cluster = MiniOzoneCluster.newBuilder(conf).build(); @@ -69,7 +75,7 @@ public static void init() throws Exception { fsoOzoneBucket = TestDataUtil .createVolumeAndBucket(client, BucketLayout.FILE_SYSTEM_OPTIMIZED); - // Set the number of keys to be processed during batch operate. + // Set the number of keys to be processed during batch operated. conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); buildNameSpaceTree(fsoOzoneBucket); @@ -83,44 +89,30 @@ public static void teardownClass() { } } - @Test - public void testSortedListStatus() throws Exception { - // a) test if output is sorted - checkKeyList("", "", 1000, 10, false); - - // b) number of keys returns is expected - checkKeyList("", "", 2, 2, false); - - // c) check if full prefix works - checkKeyList("a1", "", 100, 3, false); - - // d) check if full prefix with numEntries work - checkKeyList("a1", "", 2, 2, false); - - // e) check if existing start key >>> - checkKeyList("a1", "a1/a12", 100, 2, false); - - // f) check with non-existing start key - checkKeyList("", "a7", 100, 6, false); - - // g) check if half prefix works - checkKeyList("b", "", 100, 4, true); - - // h) check half prefix with non-existing start key - checkKeyList("b", "b5", 100, 2, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c", 100, 0, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "b/g5", 100, 4, true); - - // i) check half prefix with non-existing parent in start key - checkKeyList("b", "c/g5", 100, 0, true); + @MethodSource("sortedListStatusParametersSource") + @ParameterizedTest(name = "{index} {5}") + public void testSortedListStatus(String keyPrefix, String startKey, int numEntries, int expectedNumKeys, + boolean isPartialPrefix, String testName) throws Exception { + checkKeyList(keyPrefix, startKey, numEntries, expectedNumKeys, isPartialPrefix); + } - // j) check prefix with non-existing prefix key - // and non-existing parent in start key - checkKeyList("a1/a111", "a1/a111/a100", 100, 0, true); + private static Stream sortedListStatusParametersSource() { + return Stream.of( + arguments("", "", 1000, 10, false, "Test if output is sorted"), + arguments("", "", 2, 2, false, "Number of keys returns is expected"), + arguments("a1", "", 100, 3, false, "Check if the full prefix works"), + arguments("a1", "", 2, 2, false, "Check if full prefix with numEntries work"), + arguments("a1", "a1/a12", 100, 2, false, "Check if existing start key >>>"), + arguments("", "a7", 100, 6, false, "Check with a non-existing start key"), + arguments("b", "", 100, 4, true, "Check if half-prefix works"), + arguments("b", "b5", 100, 2, true, "Check half prefix with non-existing start key"), + arguments("b", "c", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "b/g5", 100, 4, true, "Check half prefix with non-existing parent in a start key"), + arguments("b", "c/g5", 100, 0, true, "Check half prefix with non-existing parent in a start key"), + arguments("a1/a111", "a1/a111/a100", 100, 0, true, "Check prefix with a non-existing prefix key\n" + + " and non-existing parent in a start key"), + arguments("a1/a111", null, 100, 0, true, "Check start key is null") + ); } private static void createFile(OzoneBucket bucket, String keyName) @@ -131,6 +123,7 @@ private static void createFile(OzoneBucket bucket, String keyName) oos.flush(); } } + private static void buildNameSpaceTree(OzoneBucket ozoneBucket) throws Exception { /* @@ -172,33 +165,29 @@ private static void buildNameSpaceTree(OzoneBucket ozoneBucket) createFile(ozoneBucket, "/b8"); } - private void checkKeyList(String keyPrefix, String startKey, - long numEntries, int expectedNumKeys, - boolean isPartialPrefix) - throws Exception { + private void checkKeyList(String keyPrefix, String startKey, long numEntries, int expectedNumKeys, + boolean isPartialPrefix) throws Exception { List statuses = fsoOzoneBucket.listStatus(keyPrefix, false, startKey, numEntries, isPartialPrefix); assertEquals(expectedNumKeys, statuses.size()); - System.out.println("BEGIN:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("BEGIN:::keyPrefix---> {} :::---> {}", keyPrefix, startKey); for (int i = 0; i < statuses.size() - 1; i++) { OzoneFileStatus stCurr = statuses.get(i); OzoneFileStatus stNext = statuses.get(i + 1); - System.out.println("status:" + stCurr); + LOG.info("status: {}", stCurr); assertThat(stCurr.getPath().compareTo(stNext.getPath())).isLessThan(0); } if (!statuses.isEmpty()) { OzoneFileStatus stNext = statuses.get(statuses.size() - 1); - System.out.println("status:" + stNext); + LOG.info("status: {}", stNext); } - System.out.println("END:::keyPrefix---> " + keyPrefix + ":::---> " + - startKey); + LOG.info("END:::keyPrefix---> {}:::---> {}", keyPrefix, startKey); } } From aa68aec220e8f4f83ce3f2e4cbcd8df12f684bc5 Mon Sep 17 00:00:00 2001 From: Arafat2198 <98023601+ArafatKhan2198@users.noreply.github.com> Date: Wed, 28 Feb 2024 18:48:49 +0530 Subject: [PATCH 076/108] HDDS-10324. Metadata are not updated when keys are overwritten. (#6273) --- .../ozone/om/request/key/OMKeyRequest.java | 6 + .../request/key/TestOMKeyCreateRequest.java | 151 +++++++++++++++++- 2 files changed, 149 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 100c2d842f22..7e4e30316b85 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -775,6 +775,12 @@ protected OmKeyInfo prepareFileInfo( dbKeyInfo.setModificationTime(keyArgs.getModificationTime()); dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled); dbKeyInfo.setReplicationConfig(replicationConfig); + + // Construct a new metadata map from KeyArgs. + // Clear the old one when the key is overwritten. + dbKeyInfo.getMetadata().clear(); + dbKeyInfo.getMetadata().putAll(KeyValueUtil.getFromProtobuf( + keyArgs.getMetadataList())); return dbKeyInfo; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 5d79e7771520..f61e947d2b1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -25,10 +25,11 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; -import java.util.UUID; -import java.util.stream.Collectors; import java.util.Map; +import java.util.Collections; import java.util.HashMap; +import java.util.UUID; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -41,14 +42,16 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.lock.OzoneLockProvider; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -66,6 +69,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.createOmKeyInfo; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.assertj.core.api.Assertions.assertThat; @@ -464,6 +468,107 @@ public void testValidateAndUpdateCacheWithInvalidPath( assertNull(omKeyInfo); } + + @ParameterizedTest + @MethodSource("data") + public void testOverwritingExistingMetadata( + boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + + addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, + getBucketLayout()); + + Map initialMetadata = + Collections.singletonMap("initialKey", "initialValue"); + OMRequest initialRequest = + createKeyRequest(false, 0, keyName, initialMetadata); + OMKeyCreateRequest initialOmKeyCreateRequest = + new OMKeyCreateRequest(initialRequest, getBucketLayout()); + OMClientResponse initialResponse = + initialOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); + verifyMetadataInResponse(initialResponse, initialMetadata); + + // We have to add the key to the key table, as validateAndUpdateCache only + // updates the cache and not the DB. + OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationConfig).build(); + keyInfo.setMetadata(initialMetadata); + omMetadataManager.getKeyTable(initialOmKeyCreateRequest.getBucketLayout()) + .put(getOzoneKey(), keyInfo); + + Map updatedMetadata = + Collections.singletonMap("initialKey", "updatedValue"); + OMRequest updatedRequest = + createKeyRequest(false, 0, keyName, updatedMetadata); + OMKeyCreateRequest updatedOmKeyCreateRequest = + new OMKeyCreateRequest(updatedRequest, getBucketLayout()); + + OMClientResponse updatedResponse = + updatedOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); + verifyMetadataInResponse(updatedResponse, updatedMetadata); + } + + @ParameterizedTest + @MethodSource("data") + public void testCreationWithoutMetadataFollowedByOverwriteWithMetadata( + boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { + when(ozoneManager.getOzoneLockProvider()).thenReturn( + new OzoneLockProvider(setKeyPathLock, setFileSystemPaths)); + addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, + getBucketLayout()); + + // Create the key request without any initial metadata + OMRequest createRequestWithoutMetadata = createKeyRequest(false, 0, keyName, + null); // Passing 'null' for metadata + OMKeyCreateRequest createOmKeyCreateRequest = + new OMKeyCreateRequest(createRequestWithoutMetadata, getBucketLayout()); + + // Perform the create operation without any metadata + OMClientResponse createResponse = + createOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 100L); + // Verify that no metadata exists in the response + assertThat( + createResponse.getOMResponse().getCreateKeyResponse().getKeyInfo() + .getMetadataList()).isEmpty(); + + OmKeyInfo keyInfo = createOmKeyInfo(volumeName, bucketName, keyName, + replicationConfig).build(); + omMetadataManager.getKeyTable(createOmKeyCreateRequest.getBucketLayout()) + .put(getOzoneKey(), keyInfo); + + // Define new metadata for the overwrite operation + Map overwriteMetadata = new HashMap<>(); + overwriteMetadata.put("newKey", "newValue"); + + // Overwrite the previously created key with new metadata + OMRequest overwriteRequestWithMetadata = + createKeyRequest(false, 0, keyName, overwriteMetadata); + OMKeyCreateRequest overwriteOmKeyCreateRequest = + new OMKeyCreateRequest(overwriteRequestWithMetadata, getBucketLayout()); + + // Perform the overwrite operation and capture the response + OMClientResponse overwriteResponse = + overwriteOmKeyCreateRequest.validateAndUpdateCache(ozoneManager, 101L); + // Verify the new metadata is correctly applied in the response + verifyMetadataInResponse(overwriteResponse, overwriteMetadata); + } + + + private void verifyMetadataInResponse(OMClientResponse response, + Map expectedMetadata) { + // Extract metadata from the response + List metadataList = + response.getOMResponse().getCreateKeyResponse().getKeyInfo() + .getMetadataList(); + assertEquals(expectedMetadata.size(), metadataList.size()); + metadataList.forEach(kv -> { + String expectedValue = expectedMetadata.get(kv.getKey()); + assertEquals(expectedValue, kv.getValue(), + "Metadata value mismatch for key: " + kv.getKey()); + }); + } + /** * This method calls preExecute and verify the modified request. * @param originalOMRequest @@ -543,25 +648,55 @@ protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber) { private OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, String keyName) { + return createKeyRequest(isMultipartKey, partNumber, keyName, null); + } + /** + * Create OMRequest which encapsulates a CreateKeyRequest, optionally + * with metadata. + * + * @param isMultipartKey Indicates if the key is part of a multipart upload. + * @param partNumber The part number for multipart uploads, ignored if + * isMultipartKey is false. + * @param keyName The name of the key to create or update. + * @param metadata Optional metadata for the key. Pass null or an empty + * map if no metadata is to be set. + * @return OMRequest configured with the provided parameters. + */ + protected OMRequest createKeyRequest(boolean isMultipartKey, int partNumber, + String keyName, + Map metadata) { KeyArgs.Builder keyArgs = KeyArgs.newBuilder() - .setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setIsMultipartKey(isMultipartKey) - .setFactor(((RatisReplicationConfig) replicationConfig).getReplicationFactor()) + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setIsMultipartKey(isMultipartKey) + .setFactor( + ((RatisReplicationConfig) replicationConfig).getReplicationFactor()) .setType(replicationConfig.getReplicationType()) .setLatestVersionLocation(true); + // Configure for multipart upload, if applicable if (isMultipartKey) { keyArgs.setDataSize(dataSize).setMultipartNumber(partNumber); } + // Include metadata, if provided + if (metadata != null && !metadata.isEmpty()) { + metadata.forEach((key, value) -> keyArgs.addMetadata(KeyValue.newBuilder() + .setKey(key) + .setValue(value) + .build())); + } + OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest = CreateKeyRequest.newBuilder().setKeyArgs(keyArgs).build(); return OMRequest.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.CreateKey) .setClientId(UUID.randomUUID().toString()) - .setCreateKeyRequest(createKeyRequest).build(); + .setCreateKeyRequest(createKeyRequest) + .build(); } private OMRequest createKeyRequest( From 4095ef1f481f716d5b3ecc57424fe332ffda4420 Mon Sep 17 00:00:00 2001 From: Zhaohui Wang <32935220+wzhallright@users.noreply.github.com> Date: Wed, 28 Feb 2024 22:39:08 +0800 Subject: [PATCH 077/108] HDDS-10278. Simplify tests using assertDoesNotThrow (#6291) --- .../TestReplicationSupervisor.java | 16 ++++------- .../hdds/utils/db/TestRDBTableStore.java | 8 ++---- .../hdds/utils/db/TestTypedRDBTableStore.java | 8 ++---- .../TestDirectoryDeletingServiceWithFSO.java | 25 ++++++++--------- .../hadoop/fs/ozone/TestRootedDDSWithFSO.java | 25 +++++++---------- .../hadoop/ozone/TestSecureOzoneCluster.java | 22 ++++----------- .../hadoop/ozone/om/TestScmSafeMode.java | 7 +---- .../ozone/om/TestSnapshotDeletingService.java | 18 ++++++------ .../TestSnapshotDirectoryCleaningService.java | 25 +++++++---------- ...estReconInsightsForDeletedDirectories.java | 28 +++++++++---------- .../ozone/om/TestAuthorizerLockImpl.java | 9 ++---- ...zoneManagerDoubleBufferWithOMResponse.java | 11 +++----- .../volume/TestOMVolumeDeleteResponse.java | 15 ++-------- .../om/service/TestKeyDeletingService.java | 17 ++++++----- .../hadoop/ozone/recon/api/TestEndpoints.java | 7 ++--- .../recon/api/TestOpenContainerCount.java | 26 ++++++++--------- 16 files changed, 99 insertions(+), 168 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index 26c6853b64a6..f42d6afd6814 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -79,8 +79,8 @@ import static org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status.DONE; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.fromSources; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; import static org.mockito.Mockito.any; @@ -478,11 +478,8 @@ private static class BlockingTask extends AbstractReplicationTask { @Override public void runTask() { runningLatch.countDown(); - try { - waitForCompleteLatch.await(); - } catch (InterruptedException e) { - fail("Interrupted waiting for the completion latch to be released"); - } + assertDoesNotThrow(() -> waitForCompleteLatch.await(), + "Interrupted waiting for the completion latch to be released"); setStatus(DONE); } } @@ -607,13 +604,10 @@ public void replicate(ReplicationTask task) { UUID.randomUUID().toString(), UUID.randomUUID().toString()); KeyValueContainer kvc = new KeyValueContainer(kvcd, conf); - - try { + assertDoesNotThrow(() -> { set.addContainer(kvc); task.setStatus(DONE); - } catch (Exception e) { - fail("Unexpected error: " + e.getMessage()); - } + }); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 504c3dd47f32..8095c1cbb1f4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -48,6 +48,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -55,7 +56,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; /** * Tests for RocksDBTable Store. @@ -96,11 +96,7 @@ public static void initConstants() { private static boolean consume(Table.KeyValue keyValue) { count++; - try { - assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - fail("Unexpected Exception " + ex); - } + assertNotNull(assertDoesNotThrow(keyValue::getKey)); return true; } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java index 9e16ebb99e19..f437d6518c5f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java @@ -21,6 +21,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -29,7 +30,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.File; @@ -217,11 +217,7 @@ public void batchDelete() throws Exception { private static boolean consume(Table.KeyValue keyValue) { count++; - try { - assertNotNull(keyValue.getKey()); - } catch (IOException ex) { - fail(ex.toString()); - } + assertNotNull(assertDoesNotThrow(keyValue::getKey)); return true; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 87f114bd7115..382f4b72034c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -55,13 +55,14 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.LongSupplier; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; @@ -126,15 +127,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @Test @@ -534,16 +533,14 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + + return count.get() == expectedCount; } private void checkPath(Path path) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java index 074a8e7df4ba..de3358685ec4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java @@ -47,9 +47,9 @@ import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; -import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.fs.ozone.TestDirectoryDeletingServiceWithFSO.assertSubPathsCount; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -58,10 +58,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.fail; /** * Directory deletion service test cases using rooted ozone filesystem @@ -128,15 +128,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), false); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @Test @@ -227,16 +225,13 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private static BucketLayout getFSOBucketLayout() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 1be5b64ac87d..bc49d176da9a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -179,7 +179,6 @@ import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; import static org.mockito.Mockito.mock; @@ -1346,27 +1345,16 @@ void testOMGrpcServerCertificateRenew() throws Exception { } // get new client, it should succeed. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } + OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); + client1.close(); + // Wait for old OM certificate to expire GenericTestUtils.waitFor(() -> omCert.getNotAfter().before(new Date()), 500, certLifetime * 1000); // get new client, it should succeed too. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } + OzoneClient client2 = OzoneClientFactory.getRpcClient(conf); + client2.close(); } finally { OzoneManager.setUgi(null); GrpcOmTransport.setCaCerts(null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index d5c2c64c208a..427d3bf8a482 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -349,12 +349,7 @@ public void testSCMSafeModeDisabled() throws Exception { public void testCreateRetryWhileSCMSafeMode() throws Exception { // Test1: Test safe mode when there are no containers in system. cluster.stop(); - - try { - cluster = builder.build(); - } catch (IOException e) { - fail("Cluster startup failed."); - } + cluster = builder.build(); final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java index 9f697d4148b3..d4bf911676ab 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDeletingService.java @@ -53,14 +53,15 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test Snapshot Deleting Service. @@ -553,15 +554,12 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java index 6b39b76c5466..893e248d88c5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java @@ -46,17 +46,17 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; /** * Test Snapshot Directory Service. @@ -114,15 +114,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } @SuppressWarnings("checkstyle:LineLength") @@ -258,15 +256,12 @@ private void assertTableRowCount(Table table, int count) private boolean assertTableRowCount(int expectedCount, Table table) { - long count = 0L; - try { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(cluster.getOzoneManager().getMetadataManager().countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 4c059be1b542..ca8fcae6643b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -54,6 +54,7 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -61,6 +62,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.fail; @@ -127,15 +129,13 @@ public static void teardown() { @AfterEach public void cleanup() { - try { + assertDoesNotThrow(() -> { Path root = new Path("/"); FileStatus[] fileStatuses = fs.listStatus(root); for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex) { - fail("Failed to cleanup files."); - } + }); } /** @@ -461,21 +461,19 @@ private void assertTableRowCount(Table table, int expectedCount, private boolean assertTableRowCount(int expectedCount, Table table, boolean isRecon) { - long count = 0L; - try { + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { if (isRecon) { - count = cluster.getReconServer().getOzoneManagerServiceProvider() - .getOMMetadataManagerInstance().countRowsInTable(table); + count.set(cluster.getReconServer().getOzoneManagerServiceProvider() + .getOMMetadataManagerInstance().countRowsInTable(table)); } else { - count = cluster.getOzoneManager().getMetadataManager() - .countRowsInTable(table); + count.set(cluster.getOzoneManager().getMetadataManager() + .countRowsInTable(table)); } LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("Test failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private void syncDataFromOM() { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java index b78864e30105..f600158007b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestAuthorizerLockImpl.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.om; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLock; import org.apache.hadoop.ozone.om.multitenant.AuthorizerLockImpl; import org.apache.ozone.test.GenericTestUtils; @@ -103,17 +103,14 @@ public void testStampedLockBehavior() throws InterruptedException { public void testLockInOneThreadUnlockInAnother() { final AuthorizerLock authorizerLock = new AuthorizerLockImpl(); - - try { + assertDoesNotThrow(() -> { authorizerLock.tryWriteLockInOMRequest(); // Spawn another thread to release the lock. // Works as long as they share the same AuthorizerLockImpl instance. final Thread thread1 = new Thread(authorizerLock::unlockWriteInOMRequest); thread1.start(); - } catch (IOException e) { - fail("Should not have thrown: " + e.getMessage()); - } + }); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index a97b24289cd7..d0e814a78265 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -64,6 +64,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.newBucketInfoBuilder; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -416,13 +417,9 @@ private void testDoubleBuffer(int volumeCount, int bucketsPerVolume) } private boolean assertRowCount(int expected, Table table) { - long count = 0L; - try { - count = omMetadataManager.countRowsInTable(table); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expected; + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> count.set(omMetadataManager.countRowsInTable(table))); + return count.get() == expected; } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java index 9ae0a395e906..70dd23a7b047 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/volume/TestOMVolumeDeleteResponse.java @@ -36,12 +36,11 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import java.io.IOException; import java.util.UUID; import java.nio.file.Path; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.fail; /** * This class tests OMVolumeCreateResponse. @@ -115,7 +114,7 @@ public void testAddToDBBatch() throws Exception { } @Test - public void testAddToDBBatchNoOp() throws Exception { + public void testAddToDBBatchNoOp() { OMResponse omResponse = OMResponse.newBuilder() .setCmdType(OzoneManagerProtocolProtos.Type.DeleteVolume) @@ -126,15 +125,7 @@ public void testAddToDBBatchNoOp() throws Exception { OMVolumeDeleteResponse omVolumeDeleteResponse = new OMVolumeDeleteResponse( omResponse); - - try { - omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, - batchOperation); - } catch (IOException ex) { - fail("testAddToDBBatchFailure failed"); - } - + assertDoesNotThrow(() -> omVolumeDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation)); } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index d745a01e62e8..c5099fc75919 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -31,6 +31,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -81,10 +82,10 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.fail; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -626,15 +627,13 @@ private static void assertTableRowCount(Table table, private static boolean assertTableRowCount(long expectedCount, Table table, OMMetadataManager metadataManager) { - long count = 0L; - try { - count = metadataManager.countRowsInTable(table); + AtomicLong count = new AtomicLong(0L); + assertDoesNotThrow(() -> { + count.set(metadataManager.countRowsInTable(table)); LOG.info("{} actual row count={}, expectedCount={}", table.getName(), - count, expectedCount); - } catch (IOException ex) { - fail("testDoubleBuffer failed with: " + ex); - } - return count == expectedCount; + count.get(), expectedCount); + }); + return count.get() == expectedCount; } private void createVolumeAndBucket(String volumeName, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index f7deaddd4fb1..9c92ad4d7e1f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -113,6 +113,7 @@ import static org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl.PROMETHEUS_INSTANT_QUERY_API; import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS; import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -406,7 +407,7 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto4).build(); LayoutVersionProto layoutInfo = defaultLayoutVersionProto(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() .register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo); @@ -417,9 +418,7 @@ public void setUp() throws Exception { defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); // Write Data to OM // A sample volume (sampleVol) and a bucket (bucketOne) is already created // in AbstractOMMetadataManagerTest. diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index 8a9452a86297..f64d93707a2c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -67,8 +67,8 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyString; @@ -329,16 +329,14 @@ public void setUp() throws Exception { .addStorageReport(storageReportProto1) .addStorageReport(storageReportProto2).build(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() - .register(extendedDatanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto, - defaultLayoutVersionProto()); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto, + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); } @Test @@ -421,16 +419,14 @@ private void updateContainerReport(long containerId) { .setOriginNodeId(datanodeId) .build()) .build(); - try { + assertDoesNotThrow(() -> { reconScm.getDatanodeProtocolServer() - .register(extendedDatanodeDetailsProto, nodeReportProto, - containerReportsProto, pipelineReportsProto, - defaultLayoutVersionProto()); + .register(extendedDatanodeDetailsProto, nodeReportProto, + containerReportsProto, pipelineReportsProto, + defaultLayoutVersionProto()); // Process all events in the event queue reconScm.getEventQueue().processAll(1000); - } catch (Exception ex) { - fail(ex.getMessage()); - } + }); } private void waitAndCheckConditionAfterHeartbeat(Callable check) From 8fcd039cefd1e03b32d63a35034751e19d1b5cbb Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Wed, 28 Feb 2024 20:37:35 +0530 Subject: [PATCH 078/108] HDDS-10437. Rename method to getContainersPendingReplication (#6293) --- .../apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java | 2 +- .../hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java | 3 ++- .../hadoop/hdds/scm/node/NodeDecommissionManager.java | 4 ++-- .../hadoop/hdds/scm/server/SCMClientProtocolServer.java | 2 +- .../hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java | 8 ++++---- .../scm/cli/datanode/DecommissionStatusSubCommand.java | 6 +++--- .../cli/datanode/TestDecommissionStatusSubCommand.java | 2 +- 7 files changed, 14 insertions(+), 13 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java index fbfbb49c2521..7b10f60a5755 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitor.java @@ -35,6 +35,6 @@ public interface DatanodeAdminMonitor extends Runnable { void stopMonitoring(DatanodeDetails dn); Set getTrackedNodes(); void setMetrics(NodeDecommissionMetrics metrics); - Map> getContainersReplicatedOnNode(DatanodeDetails dn) + Map> getContainersPendingReplication(DatanodeDetails dn) throws NodeNotFoundException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java index d7975ff1e58e..23bf41dc83e8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java @@ -494,7 +494,8 @@ private boolean checkContainersReplicatedOnNode(TrackedNode dn) return underReplicated == 0 && unclosed == 0; } - public Map> getContainersReplicatedOnNode(DatanodeDetails dn) { + @Override + public Map> getContainersPendingReplication(DatanodeDetails dn) { Iterator iterator = trackedNodes.iterator(); while (iterator.hasNext()) { TrackedNode trackedNode = iterator.next(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 38e59b89e767..4ace6d22d51c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -294,9 +294,9 @@ public NodeDecommissionManager(OzoneConfiguration config, NodeManager nm, TimeUnit.SECONDS); } - public Map> getContainersReplicatedOnNode(DatanodeDetails dn) + public Map> getContainersPendingReplication(DatanodeDetails dn) throws NodeNotFoundException { - return getMonitor().getContainersReplicatedOnNode(dn); + return getMonitor().getContainersPendingReplication(dn); } @VisibleForTesting diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index faee4fcaaab7..2df2a4847e36 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -592,7 +592,7 @@ public void deleteContainer(long containerID) throws IOException { @Override public Map> getContainersOnDecomNode(DatanodeDetails dn) throws IOException { try { - return scm.getScmDecommissionManager().getContainersReplicatedOnNode(dn); + return scm.getScmDecommissionManager().getContainersPendingReplication(dn); } catch (NodeNotFoundException e) { throw new IOException("Failed to get containers list. Unable to find required node", e); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 06565e1b7e5a..5c04ad63210e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -864,8 +864,8 @@ public void testContainersReplicatedOnDecomDnAPI() assertEquals(1, monitor.getTrackedNodeCount()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dn1).getOperationalState()); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 2); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 0); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnderReplicated").size(), 2); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnClosed").size(), 0); DatanodeAdminMonitorTestUtil .mockGetContainerReplicaCount(repManager, @@ -877,8 +877,8 @@ public void testContainersReplicatedOnDecomDnAPI() assertEquals(1, monitor.getTrackedNodeCount()); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(dn1).getOperationalState()); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnderReplicated").size(), 0); - assertEquals(monitor.getContainersReplicatedOnNode(dn1).get("UnClosed").size(), 2); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnderReplicated").size(), 0); + assertEquals(monitor.getContainersPendingReplication(dn1).get("UnClosed").size(), 2); } /** diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java index 17d577ff2dc7..464b08099db7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionStatusSubCommand.java @@ -124,12 +124,12 @@ private void printCounts(DatanodeDetails datanode, JsonNode counts, int numDecom double underReplicated = Double.parseDouble(counts.get("UnderReplicatedDN." + i).toString()); double unclosed = Double.parseDouble(counts.get("UnclosedContainersDN." + i).toString()); long startTime = Long.parseLong(counts.get("StartTimeDN." + i).toString()); - System.out.print("Decommission started at : "); + System.out.print("Decommission Started At : "); Date date = new Date(startTime); DateFormat formatter = new SimpleDateFormat("dd/MM/yyyy hh:mm:ss z"); System.out.println(formatter.format(date)); - System.out.println("No. of Pipelines: " + pipelines); - System.out.println("No. of UnderReplicated containers: " + underReplicated); + System.out.println("No. of Unclosed Pipelines: " + pipelines); + System.out.println("No. of UnderReplicated Containers: " + underReplicated); System.out.println("No. of Unclosed Containers: " + unclosed); return; } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java index ad0323d334e6..fce593ab8c35 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDecommissionStatusSubCommand.java @@ -96,7 +96,7 @@ public void testSuccessWhenDecommissionStatus() throws IOException { p = Pattern.compile("Datanode:\\s.*host1\\)"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); - p = Pattern.compile("No\\. of Pipelines:"); + p = Pattern.compile("No\\. of Unclosed Pipelines:"); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); assertTrue(m.find()); // metrics for both are shown From 01f8d62fbee2be4c79c4b4d4bf759a81f5ea30bb Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Wed, 28 Feb 2024 22:10:30 +0530 Subject: [PATCH 079/108] HDDS-10331. Rename Java constants of ex-DFS config keys (#6290) --- .../hadoop/hdds/scm/XceiverClientGrpc.java | 4 +- .../hadoop/hdds/scm/XceiverClientRatis.java | 4 +- .../hadoop/hdds/conf/OzoneConfiguration.java | 60 +++--- .../apache/hadoop/hdds/ratis/RatisHelper.java | 4 +- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 74 +++---- .../apache/hadoop/ozone/OzoneConfigKeys.java | 186 +++++++++--------- .../transport/server/XceiverServerGrpc.java | 8 +- .../server/ratis/ContainerStateMachine.java | 12 +- .../server/ratis/XceiverServerRatis.java | 108 +++++----- .../container/keyvalue/KeyValueHandler.java | 4 +- .../keyvalue/impl/ChunkManagerFactory.java | 4 +- .../container/common/ContainerTestUtils.java | 2 +- .../ozone/container/common/SCMTestUtils.java | 4 +- .../common/TestDatanodeStateMachine.java | 12 +- .../TestDatanodeConfiguration.java | 2 +- .../volume/TestPeriodicVolumeChecker.java | 2 +- .../common/volume/TestVolumeSet.java | 2 +- .../volume/TestVolumeSetDiskChecks.java | 2 +- .../ozoneimpl/TestContainerReader.java | 2 +- .../ozoneimpl/TestOzoneContainer.java | 2 +- .../TestDatanodeUpgradeToSchemaV3.java | 4 +- .../hadoop/hdds/utils/HddsServerUtil.java | 6 +- .../hdds/scm/node/TestContainerPlacement.java | 2 +- .../ozone/container/common/TestEndPoint.java | 18 +- .../scm/cli/ContainerOperationClient.java | 4 +- .../hadoop/ozone/MiniOzoneChaosCluster.java | 8 +- .../TestOzoneFileSystemWithStreaming.java | 4 +- .../hdds/scm/TestRatisPipelineLeader.java | 4 +- .../apache/hadoop/ozone/RatisTestHelper.java | 6 +- .../hadoop/ozone/TestMiniOzoneCluster.java | 18 +- .../hadoop/ozone/UniformDatanodesFactory.java | 24 +-- .../client/rpc/TestContainerStateMachine.java | 2 +- .../TestContainerStateMachineFailures.java | 10 +- .../TestContainerStateMachineFlushDelay.java | 2 +- .../rpc/TestContainerStateMachineStream.java | 2 +- .../rpc/TestFailureHandlingByClient.java | 2 +- ...TestFailureHandlingByClientFlushDelay.java | 2 +- .../TestMultiBlockWritesWithDnFailures.java | 2 +- .../server/ratis/TestCSMMetrics.java | 4 +- .../metrics/TestContainerMetrics.java | 2 +- .../ozoneimpl/TestOzoneContainer.java | 4 +- .../ozoneimpl/TestSecureOzoneContainer.java | 4 +- .../container/server/TestContainerServer.java | 10 +- .../server/TestSecureContainerServer.java | 10 +- .../ozone/dn/ratis/TestDnRatisLogParser.java | 2 +- .../hadoop/ozone/shell/TestScmAdminHA.java | 2 +- .../fs/ozone/BasicOzoneClientAdapterImpl.java | 4 +- .../BasicRootedOzoneClientAdapterImpl.java | 4 +- .../ozone/s3/endpoint/ObjectEndpoint.java | 8 +- .../s3/endpoint/TestPartUploadWithStream.java | 2 +- .../s3/endpoint/TestUploadWithStream.java | 2 +- 51 files changed, 338 insertions(+), 338 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 0a38e6604897..52f435dc826d 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -166,8 +166,8 @@ private synchronized void connectToDatanode(DatanodeDetails dn) // port. int port = dn.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue(); if (port == 0) { - port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + port = config.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } // Add credential context to the client call diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index aff0aa966a79..58a2153352a4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -83,8 +83,8 @@ public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, ConfigurationSource ozoneConf, ClientTrustManager trustManager) { final String rpcType = ozoneConf - .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + .get(ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new SecurityConfig(ozoneConf), trustManager); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index ed897f898c0b..e324a63d3ba0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -326,65 +326,65 @@ private static void addDeprecatedKeys() { new DeprecationDelta("hdds.datanode.replication.work.dir", OZONE_CONTAINER_COPY_WORKDIR), new DeprecationDelta("dfs.container.chunk.write.sync", - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY), + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY), new DeprecationDelta("dfs.container.ipc", - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT), + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT), new DeprecationDelta("dfs.container.ipc.random.port", - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT), new DeprecationDelta("dfs.container.ratis.admin.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT), new DeprecationDelta("dfs.container.ratis.datanode.storage.dir", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR), new DeprecationDelta("dfs.container.ratis.datastream.enabled", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED), new DeprecationDelta("dfs.container.ratis.datastream.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT), new DeprecationDelta("dfs.container.ratis.datastream.random.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT), new DeprecationDelta("dfs.container.ratis.enabled", - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY), new DeprecationDelta("dfs.container.ratis.ipc", - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT), new DeprecationDelta("dfs.container.ratis.ipc.random.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT), new DeprecationDelta("dfs.container.ratis.leader.pending.bytes.limit", - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT), new DeprecationDelta("dfs.container.ratis.log.appender.queue.byte-limit", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT), new DeprecationDelta("dfs.container.ratis.log.appender.queue.num-elements", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS), new DeprecationDelta("dfs.container.ratis.log.purge.gap", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP), new DeprecationDelta("dfs.container.ratis.log.queue.byte-limit", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT), new DeprecationDelta("dfs.container.ratis.log.queue.num-elements", - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS), new DeprecationDelta("dfs.container.ratis.num.container.op.executors", - ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY), new DeprecationDelta("dfs.container.ratis.num.write.chunk.threads.per.volume", - ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), + ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME), new DeprecationDelta("dfs.container.ratis.replication.level", - ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY), new DeprecationDelta("dfs.container.ratis.rpc.type", - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY), new DeprecationDelta("dfs.container.ratis.segment.preallocated.size", - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY), new DeprecationDelta("dfs.container.ratis.segment.size", - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY), new DeprecationDelta("dfs.container.ratis.server.port", - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT), + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT), new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.retries", - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES), new DeprecationDelta("dfs.container.ratis.statemachinedata.sync.timeout", - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT), new DeprecationDelta("dfs.container.ratis.statemachine.max.pending.apply-transactions", - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS), new DeprecationDelta("dfs.ratis.leader.election.minimum.timeout.duration", - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY), new DeprecationDelta("dfs.ratis.server.retry-cache.timeout.duration", - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY), new DeprecationDelta("dfs.ratis.snapshot.threshold", - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY) + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY) }); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index cb7f6f8a3b31..bcea4d0193bd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -234,8 +234,8 @@ public static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, private static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } public static BiFunction newRaftClient( diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index e093a45af03d..d8fdbc1063a9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -41,95 +41,95 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_DB_DIRS_PERMISSIONS = "ozone.scm.db.dirs.permissions"; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY = "hdds.container.ratis.enabled"; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY = "hdds.container.ratis.rpc.type"; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT = "GRPC"; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME = "hdds.container.ratis.num.write.chunk.threads.per.volume"; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY = "hdds.container.ratis.replication.level"; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT = ReplicationLevel.MAJORITY; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY = "hdds.container.ratis.num.container.op.executors"; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY = "hdds.container.ratis.segment.size"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT = "64MB"; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY = "hdds.container.ratis.segment.preallocated.size"; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = "4MB"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = "hdds.container.ratis.statemachinedata.sync.timeout"; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = TimeDuration.valueOf(10, TimeUnit.SECONDS); public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = "hdds.container.ratis.statemachinedata.sync.retries"; public static final String - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS = "hdds.container.ratis.statemachine.max.pending.apply-transactions"; // The default value of maximum number of pending state machine apply // transactions is kept same as default snapshot threshold. public static final int - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT = 100000; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = "hdds.container.ratis.log.queue.num-elements"; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = "hdds.container.ratis.log.queue.byte-limit"; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = "4GB"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = "hdds.container.ratis.log.appender.queue.num-elements"; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = "hdds.container.ratis.log.appender.queue.byte-limit"; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = "hdds.container.ratis.log.purge.gap"; // TODO: Set to 1024 once RATIS issue around purge is fixed. - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = "hdds.container.ratis.leader.pending.bytes.limit"; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = "hdds.ratis.server.retry-cache.timeout.duration"; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = "hdds.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(5, TimeUnit.SECONDS); - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = "hdds.ratis.snapshot.threshold"; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000; // TODO : this is copied from OzoneConsts, may need to move to a better place public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 300711000934..0080686575ba 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -36,9 +36,9 @@ @InterfaceAudience.Public @InterfaceStability.Unstable public final class OzoneConfigKeys { - public static final String DFS_CONTAINER_IPC_PORT = + public static final String HDDS_CONTAINER_IPC_PORT = "hdds.container.ipc.port"; - public static final int DFS_CONTAINER_IPC_PORT_DEFAULT = 9859; + public static final int HDDS_CONTAINER_IPC_PORT_DEFAULT = 9859; public static final String OZONE_METADATA_DIRS = "ozone.metadata.dirs"; @@ -56,52 +56,52 @@ public final class OzoneConfigKeys { * so that a mini cluster is able to launch multiple containers on a node. * * When set to false (default), the container port will be specified as - * {@link #DFS_CONTAINER_IPC_PORT} and the default value will be specified - * as {@link #DFS_CONTAINER_IPC_PORT_DEFAULT}. + * {@link #HDDS_CONTAINER_IPC_PORT} and the default value will be specified + * as {@link #HDDS_CONTAINER_IPC_PORT_DEFAULT}. */ - public static final String DFS_CONTAINER_IPC_RANDOM_PORT = + public static final String HDDS_CONTAINER_IPC_RANDOM_PORT = "hdds.container.ipc.random.port"; - public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = + public static final boolean HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT = "hdds.container.ratis.datastream.random.port"; public static final boolean - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT = false; - public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY = + public static final String HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY = "hdds.container.chunk.write.sync"; - public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; + public static final boolean HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false; /** * Ratis Port where containers listen to. */ - public static final String DFS_CONTAINER_RATIS_IPC_PORT = + public static final String HDDS_CONTAINER_RATIS_IPC_PORT = "hdds.container.ratis.ipc.port"; - public static final int DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; + public static final int HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT = 9858; /** * Ratis Port where containers listen to admin requests. */ - public static final String DFS_CONTAINER_RATIS_ADMIN_PORT = + public static final String HDDS_CONTAINER_RATIS_ADMIN_PORT = "hdds.container.ratis.admin.port"; - public static final int DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; + public static final int HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT = 9857; /** * Ratis Port where containers listen to server-to-server requests. */ - public static final String DFS_CONTAINER_RATIS_SERVER_PORT = + public static final String HDDS_CONTAINER_RATIS_SERVER_PORT = "hdds.container.ratis.server.port"; - public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; + public static final int HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856; /** * Ratis Port where containers listen to datastream requests. */ - public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLED + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED = "hdds.container.ratis.datastream.enabled"; - public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT + public static final boolean HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT = false; - public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT + public static final String HDDS_CONTAINER_RATIS_DATASTREAM_PORT = "hdds.container.ratis.datastream.port"; - public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT + public static final int HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT = 9855; /** @@ -133,9 +133,9 @@ public final class OzoneConfigKeys { * When set to true, allocate a random free port for ozone container, so that * a mini cluster is able to launch multiple containers on a node. */ - public static final String DFS_CONTAINER_RATIS_IPC_RANDOM_PORT = + public static final String HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT = "hdds.container.ratis.ipc.random.port"; - public static final boolean DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = + public static final boolean HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT = false; public static final String OZONE_METADATA_STORE_ROCKSDB_STATISTICS = @@ -325,97 +325,97 @@ public final class OzoneConfigKeys { public static final int OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT = 10; - public static final String DFS_CONTAINER_RATIS_ENABLED_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; - public static final boolean DFS_CONTAINER_RATIS_ENABLED_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY; - public static final String DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_ENABLED_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY; + public static final boolean HDDS_CONTAINER_RATIS_ENABLED_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY; + public static final String HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT; public static final String - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME; public static final int - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT = ScmConfigKeys. - DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; - public static final String DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; + HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_KEY; public static final ReplicationLevel - DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; - public static final String DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; - public static final int DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; - public static final String DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; - public static final String DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY - = ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; + HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_REPLICATION_LEVEL_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY; + public static final int HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY; public static final String - DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; + HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT; // config settings to enable stateMachineData write timeout public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT; public static final TimeDuration - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = + public static final String HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR = "hdds.container.ratis.datanode.storage.dir"; - public static final String DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; + public static final String HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; + HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT; public static final String - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = - ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; - public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; - public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES = + ScmConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS; + public static final int HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT; + public static final String HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; public static final int - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; public static final String - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LOG_PURGE_GAP = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP; - public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; - public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LOG_PURGE_GAP = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP; + public static final int HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT; + public static final String HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT; public static final String - DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = - ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; + HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = + ScmConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT; public static final String - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; public static final TimeDuration - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = - ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; - public static final String DFS_RATIS_SNAPSHOT_THRESHOLD_KEY = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY; - public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = - ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + ScmConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT; + public static final String HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY; + public static final long HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = + ScmConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT; public static final String HDDS_DATANODE_PLUGINS_KEY = "hdds.datanode.plugins"; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 009e6396e0d2..346b05ebb4c1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -99,11 +99,11 @@ public XceiverServerGrpc(DatanodeDetails datanodeDetails, this.id = datanodeDetails.getUuid(); this.datanodeDetails = datanodeDetails; - this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + this.port = conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); - if (conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { + if (conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT_DEFAULT)) { this.port = 0; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index e3c2913ec5af..fdbe8c981cb9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -219,8 +219,8 @@ public ContainerStateMachine(RaftGroupId gid, this.writeChunkFutureMap = new ConcurrentHashMap<>(); applyTransactionCompletionMap = new ConcurrentHashMap<>(); long pendingRequestsBytesLimit = (long)conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); // cache with FIFO eviction, and if element not found, this needs // to be obtained from disk for slow follower @@ -238,13 +238,13 @@ public ContainerStateMachine(RaftGroupId gid, this.container2BCSIDMap = new ConcurrentHashMap<>(); final int numContainerOpExecutors = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_DEFAULT); int maxPendingApplyTransactions = conf.getInt( ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS, ScmConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); + HDDS_CONTAINER_RATIS_STATEMACHINE_MAX_PENDING_APPLY_TXNS_DEFAULT); applyTransactionSemaphore = new Semaphore(maxPendingApplyTransactions); stateMachineHealthy = new AtomicBoolean(true); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index fcc611ea3f10..53ae98f50c01 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -110,12 +110,12 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY; import static org.apache.ratis.util.Preconditions.assertTrue; /** @@ -189,8 +189,8 @@ private XceiverServerRatis(DatanodeDetails dd, ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class); assignPorts(); this.streamEnable = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); RaftProperties serverProperties = newRaftProperties(); this.context = context; this.dispatcher = dispatcher; @@ -217,17 +217,17 @@ private XceiverServerRatis(DatanodeDetails dd, private void assignPorts() { clientPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); if (DatanodeVersion.fromProtoValue(datanodeDetails.getInitialVersion()) .compareTo(SEPARATE_RATIS_PORTS_AVAILABLE) >= 0) { adminPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT_DEFAULT); serverPort = determinePort( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT_DEFAULT); } else { adminPort = clientPort; serverPort = clientPort; @@ -236,8 +236,8 @@ private void assignPorts() { private int determinePort(String key, int defaultValue) { boolean randomPort = conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT_DEFAULT); return randomPort ? 0 : conf.getInt(key, defaultValue); } @@ -249,14 +249,14 @@ private ContainerStateMachine getStateMachine(RaftGroupId gid) { private void setUpRatisStream(RaftProperties properties) { // set the datastream config if (conf.getBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { + HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) { dataStreamPort = 0; } else { dataStreamPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); } RatisHelper.enableNettyStreaming(properties); NettyConfigKeys.DataStream.setPort(properties, dataStreamPort); @@ -327,8 +327,8 @@ public RaftProperties newRaftProperties() { } long snapshotThreshold = - conf.getLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, - OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); + conf.getLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, + OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT); RaftServerConfigKeys.Snapshot. setAutoTriggerEnabled(properties, true); RaftServerConfigKeys.Snapshot. @@ -338,11 +338,11 @@ public RaftProperties newRaftProperties() { setPendingRequestsLimits(properties); int logQueueNumElements = - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_NUM_ELEMENTS_DEFAULT); final long logQueueByteLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setQueueElementLimit( properties, logQueueNumElements); @@ -353,8 +353,8 @@ public RaftProperties newRaftProperties() { false); int purgeGap = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT); RaftServerConfigKeys.Log.setPurgeGap(properties, purgeGap); //Set the number of Snapshots Retained. @@ -375,12 +375,12 @@ private void setRatisLeaderElectionTimeout(RaftProperties properties) { long duration; TimeUnit leaderElectionMinTimeoutUnit = OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, OzoneConfigKeys. - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT .getDuration(), leaderElectionMinTimeoutUnit); final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf(duration, leaderElectionMinTimeoutUnit); @@ -396,11 +396,11 @@ private void setTimeoutForRetryCache(RaftProperties properties) { TimeUnit timeUnit; long duration; timeUnit = - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getUnit(); duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, - OzoneConfigKeys.DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT .getDuration(), timeUnit); final TimeDuration retryCacheTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -410,8 +410,8 @@ private void setTimeoutForRetryCache(RaftProperties properties) { private long setRaftSegmentPreallocatedSize(RaftProperties properties) { final long raftSegmentPreallocatedSize = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, StorageUnit.BYTES); RaftServerConfigKeys.Log.setPreallocatedSize(properties, SizeInBytes.valueOf(raftSegmentPreallocatedSize)); @@ -420,23 +420,23 @@ private long setRaftSegmentPreallocatedSize(RaftProperties properties) { private void setRaftSegmentAndWriteBufferSize(RaftProperties properties) { final int logAppenderQueueNumElements = conf.getInt( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, StorageUnit.BYTES); final long raftSegmentSize = (long) conf.getStorageSize( - DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, - DFS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY, + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_DEFAULT, StorageUnit.BYTES); final long raftSegmentBufferSize = logAppenderQueueByteLimit + 8; assertTrue(raftSegmentBufferSize <= raftSegmentSize, - () -> DFS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + () -> HDDS_CONTAINER_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT + " = " + logAppenderQueueByteLimit - + " must be <= (" + DFS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + + " must be <= (" + HDDS_CONTAINER_RATIS_SEGMENT_SIZE_KEY + " - 8" + " = " + (raftSegmentSize - 8) + ")"); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, @@ -454,11 +454,11 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true); TimeUnit timeUnit = OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit(); long duration = conf.getTimeDuration( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT, OzoneConfigKeys. - DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT + HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT .getDuration(), timeUnit); final TimeDuration dataSyncTimeout = TimeDuration.valueOf(duration, timeUnit); @@ -479,7 +479,7 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { int syncTimeoutRetryDefault = (int) nodeFailureTimeoutMs / dataSyncTimeout.toIntExact(TimeUnit.MILLISECONDS); int numSyncRetries = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_RETRIES, syncTimeoutRetryDefault); RaftServerConfigKeys.Log.StateMachineData.setSyncTimeoutRetry(properties, numSyncRetries); @@ -507,8 +507,8 @@ private void setStateMachineDataConfigurations(RaftProperties properties) { private RpcType setRpcType(RaftProperties properties) { final String rpcType = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, - OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); RatisHelper.setRpcType(properties, rpc); return rpc; @@ -517,8 +517,8 @@ private RpcType setRpcType(RaftProperties properties) { private void setPendingRequestsLimits(RaftProperties properties) { long pendingRequestsBytesLimit = (long) conf.getStorageSize( - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, - OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT, StorageUnit.BYTES); final int pendingRequestsMegaBytesLimit = HddsUtils.roundupMb(pendingRequestsBytesLimit); @@ -990,9 +990,9 @@ private static List createChunkExecutors( // TODO create single pool with N threads if using non-incremental chunks final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index c9d6672ee885..e35c6345683f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -164,9 +164,9 @@ public KeyValueHandler(ConfigurationSource config, // Requests. final int threadCountPerDisk = conf.getInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_DEFAULT); final int numberOfDisks = HddsServerUtil.getDatanodeStorageDirs(conf).size(); containerCreationLocks = Striped.lazyWeakLock( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index 1267ed786892..288a2d3e3312 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -51,8 +51,8 @@ private ChunkManagerFactory() { public static ChunkManager createChunkManager(ConfigurationSource conf, BlockManager manager, VolumeSet volSet) { boolean sync = - conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, - OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); + conf.getBoolean(OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_KEY, + OzoneConfigKeys.HDDS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); boolean persist = conf.getBoolean(HDDS_CONTAINER_PERSISTDATA, HDDS_CONTAINER_PERSISTDATA_DEFAULT); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 33bc4a851664..c63f82025e09 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -344,7 +344,7 @@ public static ContainerController getEmptyContainerController() { public static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); return XceiverServerRatis.newXceiverServerRatis(dn, conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index 7917a4ce55cd..21775245efb2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -155,8 +155,8 @@ public static HddsProtos.ReplicationFactor getReplicationFactor( private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 5738f5c1106e..e1e1ee9172a8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -82,12 +82,12 @@ void setUp() throws Exception { conf = SCMTestUtils.getConf(testRoot); conf.setTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); serverAddresses = new ArrayList<>(); scmServers = new ArrayList<>(); mockServers = new ArrayList<>(); @@ -200,7 +200,7 @@ public void testDatanodeStateContext() throws IOException, DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = @@ -327,7 +327,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { DatanodeDetails datanodeDetails = getNewDatanodeDetails(); DatanodeDetails.Port port = DatanodeDetails.newPort( DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); datanodeDetails.setPort(port); try (DatanodeStateMachine stateMachine = diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 565853c22dde..657afc38874a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -178,7 +178,7 @@ public void isCreatedWitDefaultValues() { public void testConf() throws Exception { final OzoneConfiguration conf = new OzoneConfiguration(); final String dir = "dummy/dir"; - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final DatanodeRatisServerConfig ratisConf = conf.getObject( DatanodeRatisServerConfig.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java index 3859cd47c9b9..46b8cc6772e8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestPeriodicVolumeChecker.java @@ -59,7 +59,7 @@ public class TestPeriodicVolumeChecker { public void setup() throws IOException { conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, Files.createDirectory(folder.resolve("VolumeCheckerDir")).toString()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 1159d4277c78..68e687fefade 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -82,7 +82,7 @@ public void setup() throws Exception { volumes.add(volume1); volumes.add(volume2); conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dataDirKey); initializeVolumeSet(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index e3c610bfe47a..eb1f7979f8b9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -228,7 +228,7 @@ private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { for (int i = 0; i < numDirs; ++i) { metaDirs.add(new File(dir, randomAlphanumeric(10)).toString()); } - ozoneConf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + ozoneConf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", metaDirs)); final List dbDirs = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 7f38eab785b8..8fd7b6280b62 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -378,7 +378,7 @@ public void testMultipleContainerReader(ContainerTestVersionInfo versionInfo) BlockUtils.shutdownCache(conf); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, datanodeDirs.toString()); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, datanodeDirs.toString()); MutableVolumeSet volumeSets = new MutableVolumeSet(datanodeId.toString(), clusterId, conf, null, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 497418dcdcb9..07804c2a20bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -165,7 +165,7 @@ public void testBuildNodeReport(ContainerTestVersionInfo versionInfo) throws Exception { initTest(versionInfo); String path = folder.toString(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, String.join(",", path + "/ratis1", path + "/ratis2", path + "ratis3")); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 383e76dcc72a..23b7da263465 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -99,9 +99,9 @@ private void initTests(Boolean enable) throws Exception { conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED, schemaV3Enabled); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); setup(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 70d394e73b31..4fae3686c93c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -382,14 +382,14 @@ public static int getLogWarnInterval(ConfigurationSource conf) { * @return port number. */ public static int getContainerPort(ConfigurationSource conf) { - return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + return conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); } public static Collection getOzoneDatanodeRatisDirectory( ConfigurationSource conf) { Collection rawLocations = conf.getTrimmedStringCollection( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (rawLocations.isEmpty()) { rawLocations = new ArrayList<>(1); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index b241ac0f2d28..f3a303cad738 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -186,7 +186,7 @@ public void testContainerPlacementCapacity() throws IOException, testDir.getAbsolutePath()); conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class); - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 58f65df8fd85..c74e274d3d72 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -151,10 +151,10 @@ public void testGetVersionTask() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer(dnDetails, ozoneConf, ContainerTestUtils.getMockContext(dnDetails, ozoneConf)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); @@ -179,9 +179,9 @@ public void testGetVersionTask() throws Exception { */ @Test public void testDeletedContainersClearedOnStartup() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, @@ -217,12 +217,12 @@ public void testDeletedContainersClearedOnStartup() throws Exception { @Test public void testCheckVersionResponse() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); ozoneConf.setFromObject(new ReplicationConfig().setPort(0)); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -267,7 +267,7 @@ public void testCheckVersionResponse() throws Exception { */ @Test public void testDnLayoutVersionFile() throws Exception { - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); try (EndpointStateMachine rpcEndPoint = createEndpoint(ozoneConf, serverAddress, 1000)) { @@ -579,7 +579,7 @@ private StateContext heartbeatTaskHelper( // Mini Ozone cluster will not come up if the port is not true, since // Ratis will exit if the server port cannot be bound. We can remove this // hard coding once we fix the Ratis default behaviour. - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 6a5550e9fbd3..499d58b1ff2a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -98,8 +98,8 @@ public ContainerOperationClient(OzoneConfiguration conf) throws IOException { containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); boolean useRatis = conf.getBoolean( - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, - ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, + ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_DEFAULT); if (useRatis) { replicationFactor = HddsProtos.ReplicationFactor.THREE; replicationType = HddsProtos.ReplicationType.RATIS; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 6469a631768c..f7f49fec3d1d 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -261,10 +261,10 @@ protected void initializeConfiguration() throws IOException { TimeUnit.SECONDS); conf.setInt( OzoneConfigKeys - .DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, + .HDDS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_PER_VOLUME_KEY, 4); conf.setInt( - OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, + OzoneConfigKeys.HDDS_CONTAINER_RATIS_NUM_CONTAINER_OP_EXECUTORS_KEY, 2); conf.setInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, 2); ReplicationManagerConfiguration replicationConf = @@ -273,8 +273,8 @@ protected void initializeConfiguration() throws IOException { replicationConf.setEventTimeout(Duration.ofSeconds(20)); replicationConf.setDatanodeTimeoutOffset(0); conf.setFromObject(replicationConf); - conf.setInt(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); + conf.setInt(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 100); + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys.OZONE_OM_RATIS_LOG_PURGE_GAP, 100); conf.setInt(OMConfigKeys. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 6ec6a32d4fba..059f7b3e03d3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; @@ -83,7 +83,7 @@ public static void init() throws Exception { final int blockSize = 2 * maxFlushSize; final BucketLayout layout = BucketLayout.FILE_SYSTEM_OPTIMIZED; - CONF.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); + CONF.setBoolean(HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true); CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java index 6f0bd40dde0e..2829ba234ca0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestRatisPipelineLeader.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.ozone.test.GenericTestUtils; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -132,7 +132,7 @@ public void testLeaderIdAfterLeaderChange() throws Exception { dnToStop.get().stop(); // wait long enough based on leader election min timeout Thread.sleep(4000 * conf.getTimeDuration( - DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 5, TimeUnit.SECONDS)); GenericTestUtils.waitFor(() -> { try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 5338cb8a0cc1..c084a72a3c79 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -44,11 +44,11 @@ public interface RatisTestHelper { Logger LOG = LoggerFactory.getLogger(RatisTestHelper.class); static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); - LOG.info("{} = {}", OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, + LOG.info("{} = {}", OzoneConfigKeys.HDDS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 020f8623c4ef..275061ef7843 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -48,7 +48,7 @@ import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -68,7 +68,7 @@ static void setup(@TempDir File testDir) { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); - conf.setBoolean(DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); + conf.setBoolean(HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); } @@ -114,13 +114,13 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { // Each instance of SM will create an ozone container // that bounds to a random port. - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true); - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, true); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_RANDOM_PORT, true); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); ozoneConf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); List stateMachines = new ArrayList<>(); try { @@ -168,7 +168,7 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { } // Turn off the random port flag and test again - ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + ozoneConf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); try ( DatanodeStateMachine sm1 = new DatanodeStateMachine( randomDatanodeDetails(), ozoneConf); @@ -182,8 +182,8 @@ void testContainerRandomPort(@TempDir File tempDir) throws IOException { assertFalse(ports.add(sm2.getContainer().getReadChannel().getIPCPort())); assertFalse(ports.add(sm3.getContainer().getReadChannel().getIPCPort())); assertEquals(ports.iterator().next().intValue(), - conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT)); + conf.getInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java index 6cc6bcb8e95d..8f79605ab051 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -39,12 +39,12 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_SERVER_PORT; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.anyHostWithFreePort; import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; @@ -96,7 +96,7 @@ public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException { Path ratisDir = baseDir.resolve("ratis"); Files.createDirectories(ratisDir); - dnConf.set(DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); + dnConf.set(HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, ratisDir.toString()); if (layoutVersion != null) { DatanodeLayoutStorage layoutStorage = new DatanodeLayoutStorage( @@ -111,11 +111,11 @@ private void configureDatanodePorts(ConfigurationTarget conf) { conf.set(HDDS_REST_HTTP_ADDRESS_KEY, anyHostWithFreePort()); conf.set(HDDS_DATANODE_HTTP_ADDRESS_KEY, anyHostWithFreePort()); conf.set(HDDS_DATANODE_CLIENT_ADDRESS_KEY, anyHostWithFreePort()); - conf.setInt(DFS_CONTAINER_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_IPC_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); - conf.setInt(DFS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_IPC_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_ADMIN_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_SERVER_PORT, getFreePort()); + conf.setInt(HDDS_CONTAINER_RATIS_DATASTREAM_PORT, getFreePort()); conf.setFromObject(new ReplicationServer.ReplicationConfig().setPort(getFreePort())); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 563904922e9b..3f1c31edfe70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -103,7 +103,7 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, "5s"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 4588a86a48c7..b6eaca8e80d0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -168,7 +168,7 @@ public static void init() throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20)); conf.setFromObject(raftClientConfig); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10) @@ -309,9 +309,9 @@ public void testContainerStateMachineFailures() throws Exception { // restart the hdds datanode, container should not in the regular set OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); cluster.restartHddsDatanode(dn.getDatanodeDetails(), false); ozoneContainer = cluster.getHddsDatanodes().get(index) @@ -373,9 +373,9 @@ public void testUnhealthyContainer() throws Exception { OzoneConfiguration config = dn.getConf(); final String dir = config.get(OzoneConfigKeys. - DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR) + UUID.randomUUID(); - config.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + config.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); int index = cluster.getHddsDatanodeIndex(dn.getDatanodeDetails()); // restart the hdds datanode and see if the container is listed in the // in the missing container set and not in the regular set diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index bf41df6c7878..229059d84ad1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -106,7 +106,7 @@ public void setup() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); OzoneManager.setTestSecureOmFlag(true); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString()); conf.set(OzoneConfigKeys.OZONE_SCM_CLOSE_CONTAINER_WAIT_DURATION, "2s"); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_SCRUB_INTERVAL, "2s"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java index be27dab58ed0..d48df574a94e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java @@ -123,7 +123,7 @@ public void setup() throws Exception { .setStreamBufferMaxSize(MAX_FLUSH_SIZE) .applyTo(conf); - conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); + conf.setLong(OzoneConfigKeys.HDDS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index 4e0508792959..5c0910ecdc2d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -111,7 +111,7 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index a385edd0275c..b4ad49a3ed5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -107,7 +107,7 @@ private void init() throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setBoolean( OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index ed00686bd8a5..34f85d8e9922 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -105,7 +105,7 @@ private void startCluster(int datanodes) throws Exception { conf.setFromObject(ratisClientConfig); conf.setTimeDuration( - OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + OzoneConfigKeys.HDDS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, 1, TimeUnit.SECONDS); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index 8c35d5011a5d..0fd31bb4b728 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -183,10 +183,10 @@ static void runContainerStateMachineMetrics( static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index d4900bb48783..51943a2e8d23 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -78,7 +78,7 @@ public void testContainerMetrics() throws Exception { Pipeline pipeline = MockPipeline .createSingleNodePipeline(); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); conf.setInt(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 7a64ddc5d5e7..1b8bae0d03a8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -68,7 +68,7 @@ public void testCreateOzoneContainer( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); @@ -99,7 +99,7 @@ void testOzoneContainerStart( Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(OZONE_METADATA_DIRS, ozoneMetaDir.getPath()); conf.set(HDDS_DATANODE_DIR_KEY, hddsNodeDir.getPath()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index 715b0678a173..4f24f8e6c320 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -129,10 +129,10 @@ void testCreateOzoneContainer(boolean requireToken, boolean hasToken, try { Pipeline pipeline = MockPipeline.createSingleNodePipeline(); conf.set(HDDS_DATANODE_DIR_KEY, tempFolder.toString()); - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline .getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE) .getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); container = new OzoneContainer(dn, conf, ContainerTestUtils diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 3c89bb12ee7a..c05f55bd4a74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -89,7 +89,7 @@ public class TestContainerServer { public static void setup() { DefaultMetricsSystem.setMiniClusterMode(true); CONF.set(HddsConfigKeys.HDDS_METADATA_DIR_NAME, TEST_DIR); - CONF.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); + CONF.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); caClient = new DNCertificateClient(new SecurityConfig(CONF), null, dn, null, null, null); @@ -104,7 +104,7 @@ public static void tearDown() throws Exception { public void testClientServer() throws Exception { DatanodeDetails datanodeDetails = randomDatanodeDetails(); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -121,10 +121,10 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, @@ -216,7 +216,7 @@ public void testClientServerWithContainerDispatcher() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 53420c0e2209..e0522ac6e91d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -158,7 +158,7 @@ public void testClientServer() throws Exception { HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf - .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, + .setInt(OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, pipeline.getFirstNode() .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, @@ -201,14 +201,14 @@ public void testClientServerRatisGrpc() throws Exception { static XceiverServerRatis newXceiverServerRatis( DatanodeDetails dn, OzoneConfiguration conf) throws IOException { - conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT, + conf.setInt(OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT, dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue()); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setBoolean( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); final String dir = TEST_DIR + dn.getUuid(); - conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); + conf.set(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir); final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(dn, conf, dispatcher, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index 8e95e6cb18a7..7c82633f1136 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -73,7 +73,7 @@ public void destroy() throws Exception { public void testRatisLogParsing() throws Exception { OzoneConfiguration conf = cluster.getHddsDatanodes().get(0).getConf(); String path = - conf.get(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + conf.get(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); UUID pid = cluster.getStorageContainerManager().getPipelineManager() .getPipelines().get(0).getId().getId(); File pipelineDir = new File(path, pid.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 0324d030afab..c1d55accfd70 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -51,7 +51,7 @@ public static void init() throws Exception { .build(); conf.setQuietMode(false); // enable ratis for Scm. - conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 1614f81087b1..3d426ed03498 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -190,8 +190,8 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, OzoneFSUtils.validateBucketLayout(bucket.getName(), resolvedBucketLayout); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); this.config = conf; } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 7a80878549bd..ce27fce8c1e3 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -198,8 +198,8 @@ public BasicRootedOzoneClientAdapterImpl(String omHost, int omPort, proxy = objectStore.getClientProxy(); this.configuredDnPort = conf.getInt( - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT, + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); // Fetches the bucket layout to be used by OFS. initDefaultFsBucketLayout(conf); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 1d3850b12ac9..47be6aeb6df3 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -106,8 +106,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_REPLICATION; @@ -196,8 +196,8 @@ public void init() { OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES); datastreamEnabled = ozoneConfiguration.getBoolean( - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, - DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, + HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); datastreamMinLength = (long) ozoneConfiguration.getStorageSize( OZONE_FS_DATASTREAM_AUTO_THRESHOLD, OZONE_FS_DATASTREAM_AUTO_THRESHOLD_DEFAULT, StorageUnit.BYTES); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index 775d5a197693..28ce32e74707 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -68,7 +68,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); REST.setOzoneConfiguration(conf); REST.init(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index f92496249e20..d988b4302308 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -81,7 +81,7 @@ public static void setUp() throws Exception { REST.setClient(client); OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, + conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); conf.setStorageSize(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, 1, StorageUnit.BYTES); From 1eeaa0baa2e8eacb8be4c305b787bd9bfe7b187f Mon Sep 17 00:00:00 2001 From: hao guo Date: Thu, 29 Feb 2024 02:09:58 +0800 Subject: [PATCH 080/108] HDDS-10144. Zero-Copy in replication (#6049) --- .../replication/GrpcReplicationService.java | 93 ++++++++- .../replication/ReplicationServer.java | 36 +++- .../SendContainerRequestHandler.java | 14 +- .../TestGrpcReplicationService.java | 178 +++++++++++++++++- ...estGrpcReplicationServiceWithZeroCopy.java | 31 +++ .../TestSendContainerRequestHandler.java | 2 +- 6 files changed, 342 insertions(+), 12 deletions(-) create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java index 20c36b4d1fcf..6bc237207b37 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationService.java @@ -19,7 +19,10 @@ package org.apache.hadoop.ozone.container.replication; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; +import java.util.HashSet; +import java.util.Set; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; @@ -28,11 +31,18 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc; import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; +import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; +import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; +import org.apache.ratis.thirdparty.io.grpc.ServerCallHandler; +import org.apache.ratis.thirdparty.io.grpc.ServerServiceDefinition; import org.apache.ratis.thirdparty.io.grpc.stub.CallStreamObserver; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.getDownloadMethod; +import static org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.getUploadMethod; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.fromProto; /** @@ -49,10 +59,79 @@ public class GrpcReplicationService extends private final ContainerReplicationSource source; private final ContainerImporter importer; + private final boolean zeroCopyEnabled; + + private final ZeroCopyMessageMarshaller + sendContainerZeroCopyMessageMarshaller; + + private final ZeroCopyMessageMarshaller + copyContainerZeroCopyMessageMarshaller; + public GrpcReplicationService(ContainerReplicationSource source, - ContainerImporter importer) { + ContainerImporter importer, boolean zeroCopyEnabled) { this.source = source; this.importer = importer; + this.zeroCopyEnabled = zeroCopyEnabled; + + if (zeroCopyEnabled) { + sendContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + SendContainerRequest.getDefaultInstance()); + copyContainerZeroCopyMessageMarshaller = new ZeroCopyMessageMarshaller<>( + CopyContainerRequestProto.getDefaultInstance()); + } else { + sendContainerZeroCopyMessageMarshaller = null; + copyContainerZeroCopyMessageMarshaller = null; + } + } + + public ServerServiceDefinition bindServiceWithZeroCopy() { + ServerServiceDefinition orig = super.bindService(); + if (!zeroCopyEnabled) { + LOG.info("Zerocopy is not enabled."); + return orig; + } + + Set methodNames = new HashSet<>(); + ServerServiceDefinition.Builder builder = + ServerServiceDefinition.builder(orig.getServiceDescriptor().getName()); + + // Add `upload` method with zerocopy marshaller. + MethodDescriptor uploadMethod = + getUploadMethod(); + addZeroCopyMethod(orig, builder, uploadMethod, + sendContainerZeroCopyMessageMarshaller); + methodNames.add(uploadMethod.getFullMethodName()); + + // Add `download` method with zerocopy marshaller. + MethodDescriptor + downloadMethod = getDownloadMethod(); + addZeroCopyMethod(orig, builder, downloadMethod, + copyContainerZeroCopyMessageMarshaller); + methodNames.add(downloadMethod.getFullMethodName()); + + // Add other methods as is. + orig.getMethods().stream().filter( + x -> !methodNames.contains(x.getMethodDescriptor().getFullMethodName()) + ).forEach( + builder::addMethod + ); + + return builder.build(); + } + + private static void addZeroCopyMethod( + ServerServiceDefinition orig, + ServerServiceDefinition.Builder newServiceBuilder, + MethodDescriptor origMethod, + ZeroCopyMessageMarshaller zeroCopyMarshaller) { + MethodDescriptor newMethod = origMethod.toBuilder() + .setRequestMarshaller(zeroCopyMarshaller) + .build(); + @SuppressWarnings("unchecked") + ServerCallHandler serverCallHandler = + (ServerCallHandler) orig.getMethod( + newMethod.getFullMethodName()).getServerCallHandler(); + newServiceBuilder.addMethod(newMethod, serverCallHandler); } @Override @@ -76,13 +155,21 @@ public void download(CopyContainerRequestProto request, } finally { // output may have already been closed, ignore such errors IOUtils.cleanupWithLogger(LOG, outputStream); + + if (copyContainerZeroCopyMessageMarshaller != null) { + InputStream popStream = + copyContainerZeroCopyMessageMarshaller.popStream(request); + if (popStream != null) { + IOUtils.cleanupWithLogger(LOG, popStream); + } + } } } @Override public StreamObserver upload( StreamObserver responseObserver) { - - return new SendContainerRequestHandler(importer, responseObserver); + return new SendContainerRequestHandler(importer, responseObserver, + sendContainerZeroCopyMessageMarshaller); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index d2407a61d0b5..f72ca2a6881d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -99,16 +99,18 @@ public ReplicationServer(ContainerController controller, new LinkedBlockingQueue<>(replicationQueueLimit), threadFactory); - init(); + init(replicationConfig.isZeroCopyEnable()); } - public void init() { + public void init(boolean enableZeroCopy) { + GrpcReplicationService grpcReplicationService = new GrpcReplicationService( + new OnDemandContainerReplicationSource(controller), importer, + enableZeroCopy); NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) - .addService(ServerInterceptors.intercept(new GrpcReplicationService( - new OnDemandContainerReplicationSource(controller), - importer - ), new GrpcServerInterceptor())) + .addService(ServerInterceptors.intercept( + grpcReplicationService.bindServiceWithZeroCopy(), + new GrpcServerInterceptor())) .executor(executor); if (secConf.isSecurityEnabled() && secConf.isGrpcTlsEnabled()) { @@ -203,6 +205,11 @@ public static final class ReplicationConfig { static final String REPLICATION_OUTOFSERVICE_FACTOR_KEY = PREFIX + "." + OUTOFSERVICE_FACTOR_KEY; + public static final String ZEROCOPY_ENABLE_KEY = "zerocopy.enabled"; + private static final boolean ZEROCOPY_ENABLE_DEFAULT = true; + private static final String ZEROCOPY_ENABLE_DEFAULT_VALUE = + "true"; + /** * The maximum number of replication commands a single datanode can execute * simultaneously. @@ -244,6 +251,15 @@ public static final class ReplicationConfig { ) private double outOfServiceFactor = OUTOFSERVICE_FACTOR_DEFAULT; + @Config(key = ZEROCOPY_ENABLE_KEY, + type = ConfigType.BOOLEAN, + defaultValue = ZEROCOPY_ENABLE_DEFAULT_VALUE, + tags = {DATANODE, SCM}, + description = "Specify if zero-copy should be enabled for " + + "replication protocol." + ) + private boolean zeroCopyEnable = ZEROCOPY_ENABLE_DEFAULT; + public double getOutOfServiceFactor() { return outOfServiceFactor; } @@ -277,6 +293,14 @@ public void setReplicationQueueLimit(int limit) { this.replicationQueueLimit = limit; } + public boolean isZeroCopyEnable() { + return zeroCopyEnable; + } + + public void setZeroCopyEnable(boolean zeroCopyEnable) { + this.zeroCopyEnable = zeroCopyEnable; + } + @PostConstruct public void validate() { if (replicationMaxStreams < 1) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java index 6bcd46ba0a7a..506a96fe0514 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SendContainerRequestHandler.java @@ -24,11 +24,13 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.Path; @@ -53,12 +55,15 @@ class SendContainerRequestHandler private HddsVolume volume; private Path path; private CopyContainerCompression compression; + private final ZeroCopyMessageMarshaller marshaller; SendContainerRequestHandler( ContainerImporter importer, - StreamObserver responseObserver) { + StreamObserver responseObserver, + ZeroCopyMessageMarshaller marshaller) { this.importer = importer; this.responseObserver = responseObserver; + this.marshaller = marshaller; } @Override @@ -98,6 +103,13 @@ public void onNext(SendContainerRequest req) { nextOffset += length; } catch (Throwable t) { onError(t); + } finally { + if (marshaller != null) { + InputStream popStream = marshaller.popStream(req); + if (popStream != null) { + IOUtils.cleanupWithLogger(LOG, popStream); + } + } } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index f479ff93372d..b3b26de9c278 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -17,14 +17,50 @@ */ package org.apache.hadoop.ozone.container.replication; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.CopyContainerResponseProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.volume.HddsVolume; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.ratis.thirdparty.io.grpc.stub.CallStreamObserver; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import java.io.File; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; +import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.toTarget; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -34,6 +70,146 @@ */ class TestGrpcReplicationService { + @TempDir + private Path tempDir; + + private ReplicationServer replicationServer; + private OzoneConfiguration conf; + private ContainerController containerController; + private DatanodeDetails datanode; + private static final long CONTAINER_ID = 123456L; + private final AtomicLong pushContainerId = new AtomicLong(); + + @BeforeEach + public void setUp() throws Exception { + init(false); + } + + public void init(boolean isZeroCopy) throws Exception { + conf = new OzoneConfiguration(); + + ReplicationServer.ReplicationConfig replicationConfig = + conf.getObject(ReplicationServer.ReplicationConfig.class); + + replicationConfig.setZeroCopyEnable(isZeroCopy); + + SecurityConfig secConf = new SecurityConfig(conf); + + ContainerSet containerSet = new ContainerSet(1000); + + DatanodeDetails.Builder dn = + DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) + .setHostName("localhost").setIpAddress("127.0.0.1") + .setPersistedOpState(HddsProtos.NodeOperationalState.IN_SERVICE) + .setPersistedOpStateExpiry(0); + DatanodeDetails.Port containerPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, + OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + DatanodeDetails.Port ratisPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, + OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + DatanodeDetails.Port replicationPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.REPLICATION, + replicationConfig.getPort()); + DatanodeDetails.Port streamPort = + DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM, + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + dn.addPort(containerPort); + dn.addPort(ratisPort); + dn.addPort(replicationPort); + dn.addPort(streamPort); + + datanode = dn.build(); + + final String testDir = + Files.createDirectory(tempDir.resolve("VolumeDir")).toString(); + + MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); + when(volumeSet.getVolumesList()).thenReturn(Collections.singletonList( + new HddsVolume.Builder(testDir).conf(conf).build())); + + ContainerMetrics metrics = ContainerMetrics.create(conf); + Handler containerHandler = + new KeyValueHandler(conf, datanode.getUuidString(), containerSet, + volumeSet, metrics, c -> { + }); + + containerController = new ContainerController(containerSet, + Collections.singletonMap( + ContainerProtos.ContainerType.KeyValueContainer, containerHandler)); + + KeyValueContainerData data = new KeyValueContainerData( + CONTAINER_ID, + ContainerLayoutVersion.FILE_PER_BLOCK, GB, UUID.randomUUID().toString(), + datanode.getUuidString()); + KeyValueContainer container = new KeyValueContainer(data, conf); + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), + "test-replication"); + containerSet.addContainer(container); + container.close(); + + ContainerImporter importer = mock(ContainerImporter.class); + doAnswer(invocation -> { + pushContainerId.set((long) invocation.getArguments()[0]); + return null; + }).when(importer).importContainer(anyLong(), any(), any(), any()); + doReturn(true).when(importer).isAllowedContainerImport(eq( + CONTAINER_ID)); + when(importer.chooseNextVolume()).thenReturn(new HddsVolume.Builder( + Files.createDirectory(tempDir.resolve("ImporterDir")).toString()).conf( + conf).build()); + + replicationServer = + new ReplicationServer(containerController, replicationConfig, secConf, + null, importer, datanode.threadNamePrefix()); + replicationServer.start(); + } + + @AfterEach + public void cleanup() { + replicationServer.stop(); + } + + @Test + public void testDownload() throws IOException { + SimpleContainerDownloader downloader = + new SimpleContainerDownloader(conf, null); + Path downloadDir = Files.createDirectory(tempDir.resolve("DownloadDir")); + Path result = downloader.getContainerDataFromReplicas( + CONTAINER_ID, + Collections.singletonList(datanode), downloadDir, + CopyContainerCompression.NO_COMPRESSION); + + assertTrue(result.toString().startsWith(downloadDir.toString())); + + File[] files = downloadDir.toFile().listFiles(); + + assertNotNull(files); + assertEquals(files.length, 1); + + assertTrue(files[0].getName().startsWith("container-" + + CONTAINER_ID + "-")); + + downloader.close(); + } + + @Test + public void testUpload() { + ContainerReplicationSource source = + new OnDemandContainerReplicationSource(containerController); + + GrpcContainerUploader uploader = new GrpcContainerUploader(conf, null); + + PushReplicator pushReplicator = new PushReplicator(conf, source, uploader); + + ReplicationTask task = + new ReplicationTask(toTarget(CONTAINER_ID, datanode), pushReplicator); + + pushReplicator.replicate(task); + + assertEquals(pushContainerId.get(), CONTAINER_ID); + } + @Test void closesStreamOnError() { // GIVEN @@ -51,7 +227,7 @@ public void copyData(long containerId, OutputStream destination, }; ContainerImporter importer = mock(ContainerImporter.class); GrpcReplicationService subject = - new GrpcReplicationService(source, importer); + new GrpcReplicationService(source, importer, false); CopyContainerRequestProto request = CopyContainerRequestProto.newBuilder() .setContainerID(1) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java new file mode 100644 index 000000000000..00891cf3e24d --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationServiceWithZeroCopy.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import org.junit.jupiter.api.BeforeEach; + +/** + * Tests {@link GrpcReplicationService}. + */ +class TestGrpcReplicationServiceWithZeroCopy + extends TestGrpcReplicationService { + @BeforeEach + public void setUp() throws Exception { + init(true); + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index f054358b35b4..baaf296f02ba 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -75,7 +75,7 @@ void testReceiveDataForExistingContainer() throws Exception { return null; }).when(observer).onError(any()); SendContainerRequestHandler sendContainerRequestHandler - = new SendContainerRequestHandler(containerImporter, observer); + = new SendContainerRequestHandler(containerImporter, observer, null); ByteString data = ByteString.copyFromUtf8("test"); ContainerProtos.SendContainerRequest request = ContainerProtos.SendContainerRequest.newBuilder() From c3271b8bf46ede1fb81b15120b0bc2885a7acf18 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Wed, 28 Feb 2024 19:25:19 +0100 Subject: [PATCH 081/108] HDDS-10144. (addendum) Zero-Copy in replication --- .../container/replication/TestGrpcReplicationService.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index b3b26de9c278..bad3e7ee81db 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -104,16 +104,16 @@ public void init(boolean isZeroCopy) throws Exception { .setPersistedOpStateExpiry(0); DatanodeDetails.Port containerPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.STANDALONE, - OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT_DEFAULT); DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS, - OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_IPC_PORT_DEFAULT); DatanodeDetails.Port replicationPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.REPLICATION, replicationConfig.getPort()); DatanodeDetails.Port streamPort = DatanodeDetails.newPort(DatanodeDetails.Port.Name.RATIS_DATASTREAM, - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); + OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT); dn.addPort(containerPort); dn.addPort(ratisPort); dn.addPort(replicationPort); From 083e9140a17d1572a9b42c0e6740f15e2317e187 Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Wed, 28 Feb 2024 14:15:37 -0800 Subject: [PATCH 082/108] HDDS-10433. Add Prometheus scrape target for Datanodes in Compose v2 (#6288) --- .../src/main/compose/ozone/prometheus.yml | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml b/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml index 4e512201d2b9..a88c30d57f4b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml +++ b/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml @@ -14,8 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. global: - scrape_interval: 15s # By default, scrape targets every 15 seconds. - + scrape_interval: 15s # By default, scrape targets every 15 seconds. scrape_configs: - job_name: ozone metrics_path: /prom @@ -32,7 +31,9 @@ scrape_configs: - "s3g:9878" labels: component: s3g - - targets: + - targets: # During compose bring up the number of datanodes can be specific, adding 10 nodes to account for that. + # Duplicate datanodes are specified here to account for compose v1 vs v2 differences. + # compose v1 - "ozone_datanode_1:9882" - "ozone_datanode_2:9882" - "ozone_datanode_3:9882" @@ -43,5 +44,20 @@ scrape_configs: - "ozone_datanode_8:9882" - "ozone_datanode_9:9882" - "ozone_datanode_10:9882" + # compose v2 + - "ozone-datanode-1:9882" + - "ozone-datanode-2:9882" + - "ozone-datanode-3:9882" + - "ozone-datanode-4:9882" + - "ozone-datanode-5:9882" + - "ozone-datanode-6:9882" + - "ozone-datanode-7:9882" + - "ozone-datanode-8:9882" + - "ozone-datanode-9:9882" + - "ozone-datanode-10:9882" + labels: + component: datanode + - targets: + - "recon:9888" labels: - component: datanode \ No newline at end of file + component: recon \ No newline at end of file From 804366a0f0a2ac28365812c5d325f5a1df108bf7 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 29 Feb 2024 11:14:21 +0100 Subject: [PATCH 083/108] HDDS-6713. Avoid the need to cast to MiniOzoneHAClusterImpl (#6295) --- .../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java | 13 ++++++------- .../hadoop/hdds/scm/TestFailoverWithSCMHA.java | 2 +- .../hdds/scm/TestSCMInstallSnapshotWithHA.java | 2 +- .../hadoop/hdds/scm/TestSecretKeySnapshot.java | 5 +++-- .../apache/hadoop/hdds/scm/TestSecretKeysApi.java | 4 ++-- .../hdds/scm/TestStorageContainerManagerHA.java | 2 +- .../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 5 ++--- .../hadoop/hdds/upgrade/TestScmHAFinalization.java | 7 +++---- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 2 +- .../org/apache/hadoop/ozone/TestBlockTokens.java | 4 ++-- .../apache/hadoop/ozone/TestBlockTokensCLI.java | 4 ++-- .../hadoop/ozone/TestMultipartObjectGet.java | 4 ++-- .../metrics/TestDatanodeQueueMetrics.java | 8 ++++---- .../hadoop/ozone/om/TestAddRemoveOzoneManager.java | 2 +- .../hadoop/ozone/om/TestOMBucketLayoutUpgrade.java | 8 ++++---- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 2 +- .../hadoop/ozone/om/TestOMUpgradeFinalization.java | 8 ++++---- .../hadoop/ozone/om/TestOmSnapshotDisabled.java | 5 ++--- .../ozone/om/TestOmSnapshotDisabledRestart.java | 2 +- .../apache/hadoop/ozone/om/TestOzoneManagerHA.java | 9 ++------- .../ozone/om/TestOzoneManagerHASnapshot.java | 2 +- .../ozone/om/TestSnapshotBackgroundServices.java | 2 +- .../snapshot/TestOzoneManagerSnapshotProvider.java | 2 +- .../om/snapshot/TestOzoneSnapshotRestore.java | 4 ++-- .../ozone/parser/TestOzoneHARatisLogParser.java | 2 +- .../ozone/recon/TestReconWithOzoneManagerHA.java | 8 ++++---- .../ozone/shell/TestDeletedBlocksTxnShell.java | 2 +- .../hadoop/ozone/shell/TestOzoneShellHA.java | 13 ++++++------- .../hadoop/ozone/shell/TestOzoneTenantShell.java | 14 ++++++-------- .../ozone/shell/TestTransferLeadershipShell.java | 2 +- 30 files changed, 69 insertions(+), 80 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index 6f3a9bb5a173..4f14ede8fa52 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -72,7 +72,7 @@ public class TestOzoneFsHAURLs { TestOzoneFsHAURLs.class); private OzoneConfiguration conf; - private static MiniOzoneCluster cluster; + private static MiniOzoneHAClusterImpl cluster; private static String omServiceId; private static OzoneManager om; private static int numOfOMs; @@ -107,11 +107,11 @@ static void initClass(@TempDir File tempDir) throws Exception { conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); // Start the cluster - cluster = MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(5) - .build(); + .setNumDatanodes(5); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); @@ -160,8 +160,7 @@ public static void shutdown() { * @return the leader OM's RPC address in the MiniOzoneHACluster */ private String getLeaderOMNodeAddr() { - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - OzoneManager omLeader = haCluster.getOMLeader(); + OzoneManager omLeader = cluster.getOMLeader(); assertNotNull(omLeader, "There should be a leader OM at this point."); String omNodeId = omLeader.getOMNodeId(); // omLeaderAddrKey=ozone.om.address.omServiceId.omNodeId diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java index 688d13ad361b..9db501edb721 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestFailoverWithSCMHA.java @@ -84,7 +84,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs).setNumOfActiveSCMs(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java index 0aa2599637a9..10492736144b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshotWithHA.java @@ -94,7 +94,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(numOfOMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java index ebd68eb13edb..4cfc64cd4f50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeySnapshot.java @@ -118,7 +118,8 @@ public void init() throws Exception { conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, ROTATE_DURATION_MS + "ms"); conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_MS + "ms"); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder .setSCMServiceId("TestSecretKeySnapshot") .setSCMServiceId("SCMServiceId") .setNumOfStorageContainerManagers(3) @@ -126,7 +127,7 @@ public void init() throws Exception { .setNumOfOzoneManagers(1) .setNumDatanodes(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java index 405534af95eb..6af43c3bacde 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSecretKeysApi.java @@ -326,12 +326,12 @@ public void testSecretKeyWithoutAuthorization() throws Exception { private void startCluster(int numSCMs) throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") .setNumOfStorageContainerManagers(numSCMs) .setNumOfOzoneManagers(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java index e62820cfb1d0..2986484d2ad0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHA.java @@ -95,7 +95,7 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, "1"); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index 33c09e2ce835..62e3b6321eeb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -166,9 +166,8 @@ public static void initClass() { SCMConfigurator scmConfigurator = new SCMConfigurator(); scmConfigurator.setUpgradeFinalizationExecutor(scmFinalizationExecutor); - MiniOzoneCluster.Builder builder = - new MiniOzoneHAClusterImpl.Builder(conf) - .setNumOfStorageContainerManagers(NUM_SCMS) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setNumOfStorageContainerManagers(NUM_SCMS) .setSCMConfigurator(scmConfigurator) .setNumDatanodes(NUM_DATA_NODES) .setDatanodeFactory(UniformDatanodesFactory.newBuilder() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index da63a7de6b2c..d5802aab6e02 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -91,9 +91,8 @@ public void init(OzoneConfiguration conf, conf.setInt(HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - MiniOzoneCluster.Builder clusterBuilder = - new MiniOzoneHAClusterImpl.Builder(conf) - .setNumOfStorageContainerManagers(NUM_SCMS) + MiniOzoneHAClusterImpl.Builder clusterBuilder = MiniOzoneCluster.newHABuilder(conf); + clusterBuilder.setNumOfStorageContainerManagers(NUM_SCMS) .setNumOfActiveSCMs(NUM_SCMS - numInactiveSCMs) .setSCMServiceId("scmservice") .setNumOfOzoneManagers(1) @@ -102,7 +101,7 @@ public void init(OzoneConfiguration conf, .setDatanodeFactory(UniformDatanodesFactory.newBuilder() .setLayoutVersion(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()) .build()); - this.cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); + this.cluster = clusterBuilder.build(); scmClient = cluster.getStorageContainerLocationClient(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index c33307c6906b..8746e77c9e15 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -392,7 +392,7 @@ public Builder setSCMServiceId(String serviceId) { } @Override - public MiniOzoneCluster build() throws IOException { + public MiniOzoneHAClusterImpl build() throws IOException { if (numOfActiveOMs > numOfOMs) { throw new IllegalArgumentException("Number of active OMs cannot be " + "more than the total number of OMs"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java index 9d05b54be8af..0b5dab36d3f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -378,12 +378,12 @@ private static void setSecureConfig() throws IOException { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId("TestSecretKey") .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(1); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index a42fbf2e8642..87242cb2790e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -318,13 +318,13 @@ private String[] createArgsForCommand(String[] additionalArgs) { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 0d0acfbd8c94..daeb3a7b2d74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -103,12 +103,12 @@ public static void init() throws Exception { private static void startCluster() throws IOException, TimeoutException, InterruptedException { OzoneManager.setTestSecureOmFlag(true); - MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setOMServiceId(omServiceId) .setNumOfStorageContainerManagers(3) .setNumOfOzoneManagers(3); - cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index a1d436b3360a..2f18326f7b1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -67,13 +67,13 @@ public void init() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); omServiceId = "om-service-test1"; scmServiceId = "scm-service-test1"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(1) - .build(); + .setNumDatanodes(1); + cluster = builder.build(); cluster.waitForClusterToBeReady(); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index 9d4d489586b0..ff57a1e7bbe9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -95,7 +95,7 @@ public class TestAddRemoveOzoneManager { private void setupCluster(int numInitialOMs) throws Exception { conf = new OzoneConfiguration(); conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 5); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(SCM_DUMMY_SERVICE_ID) .setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(numInitialOMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java index 4b1fa817346d..ae97b3f7b907 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMBucketLayoutUpgrade.java @@ -94,11 +94,11 @@ void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, fromLayoutVersion); String omServiceId = UUID.randomUUID().toString(); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(3) - .setNumDatanodes(1) - .build(); + .setNumDatanodes(1); + cluster = builder.build(); cluster.waitForClusterToBeReady(); ozoneManager = cluster.getOzoneManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index a42b9ce4693b..42708b0b1607 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -155,7 +155,7 @@ public void init(TestInfo testInfo) throws Exception { omRatisConf.setLogAppenderWaitTimeMin(10); conf.setFromObject(omRatisConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index 716348e65a6f..ccf94bef3c80 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -105,11 +105,11 @@ void testOMUpgradeFinalizationWithOneOMDown() throws Exception { private static MiniOzoneHAClusterImpl newCluster(OzoneConfiguration conf) throws IOException { conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, INITIAL_VERSION.layoutVersion()); - return (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(UUID.randomUUID().toString()) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(UUID.randomUUID().toString()) .setNumOfOzoneManagers(3) - .setNumDatanodes(1) - .build(); + .setNumDatanodes(1); + return builder.build(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java index 37ec1a32471d..df9a4ddfe227 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java @@ -43,7 +43,7 @@ */ public class TestOmSnapshotDisabled { - private static MiniOzoneCluster cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static OzoneClient client; private static ObjectStore store; @@ -64,8 +64,7 @@ public static void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = - ((MiniOzoneHAClusterImpl) cluster).getOMLeader(); + OzoneManager leaderOzoneManager = cluster.getOMLeader(); OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); cluster.setConf(leaderConfig); store = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java index 4b84f5c925ed..8a5ae0234910 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java @@ -54,7 +54,7 @@ public static void init() throws Exception { // Enable filesystem snapshot feature at the beginning conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test2") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 091cd6fb1e06..2c6ab49b210d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -76,7 +76,6 @@ public abstract class TestOzoneManagerHA { private static MiniOzoneHAClusterImpl cluster = null; - private static MiniOzoneCluster.Builder clusterBuilder = null; private static ObjectStore objectStore; private static OzoneConfiguration conf; private static String omServiceId; @@ -106,10 +105,6 @@ public OzoneConfiguration getConf() { return conf; } - public MiniOzoneCluster.Builder getClusterBuilder() { - return clusterBuilder; - } - public String getOmServiceId() { return omServiceId; } @@ -177,11 +172,11 @@ public static void init() throws Exception { conf.set(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, "10s"); conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2"); - clusterBuilder = MiniOzoneCluster.newHABuilder(conf) + MiniOzoneHAClusterImpl.Builder clusterBuilder = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs); - cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); + cluster = clusterBuilder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(omServiceId, conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java index 11f655ae5f86..14826a18616f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHASnapshot.java @@ -70,7 +70,7 @@ public static void staticInit() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test") .setNumOfOzoneManagers(3) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java index c3a9c075d11c..54ee0ed53796 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotBackgroundServices.java @@ -151,7 +151,7 @@ public void init(TestInfo testInfo) throws Exception { OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, SNAPSHOT_THRESHOLD); int numOfOMs = 3; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId("om-service-test1") .setNumOfOzoneManagers(numOfOMs) .setNumOfActiveOMs(2) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index c91cc29a8dcb..d28f25a28fac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -66,7 +66,7 @@ public void init() throws Exception { omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index 226e707f33f5..a88290bfb8d8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -72,7 +72,7 @@ @Timeout(value = 300) public class TestOzoneSnapshotRestore { private static final String OM_SERVICE_ID = "om-service-test-1"; - private MiniOzoneCluster cluster; + private MiniOzoneHAClusterImpl cluster; private ObjectStore store; private OzoneManager leaderOzoneManager; private OzoneConfiguration clientConf; @@ -111,7 +111,7 @@ public void init() throws Exception { .build(); cluster.waitForClusterToBeReady(); - leaderOzoneManager = ((MiniOzoneHAClusterImpl) cluster).getOMLeader(); + leaderOzoneManager = cluster.getOMLeader(); OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); cluster.setConf(leaderConfig); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java index 8f11941fcbf6..dff4cd046c9b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/parser/TestOzoneHARatisLogParser.java @@ -66,7 +66,7 @@ void setup() throws Exception { String omServiceId = "omServiceId1"; OzoneConfiguration conf = new OzoneConfiguration(); String scmServiceId = "scmServiceId"; - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId) .setNumOfOzoneManagers(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 4ba546c47dad..0d7cb5fbf075 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -73,12 +73,12 @@ public void setup() throws Exception { dbConf.setSyncOption(true); conf.setFromObject(dbConf); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(OM_SERVICE_ID) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(OM_SERVICE_ID) .setNumOfOzoneManagers(3) .setNumDatanodes(1) - .includeRecon(true) - .build(); + .includeRecon(true); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf); objectStore = client.getObjectStore(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 8bb6313cf73f..6f6c5439d8c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -96,7 +96,7 @@ public void init() throws Exception { conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setSCMServiceId(scmServiceId) .setNumOfStorageContainerManagers(numOfSCMs) .setNumOfActiveSCMs(numOfSCMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 00450985f10c..ba36fa1d97c5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -128,7 +128,7 @@ public class TestOzoneShellHA { private static File baseDir; private static File testFile; private static String testFilePathString; - private static MiniOzoneCluster cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static File testDir; private static MiniKMS miniKMS; private static OzoneClient client; @@ -184,11 +184,11 @@ protected static void startCluster(OzoneConfiguration conf) throws Exception { conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI(miniKMS)); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - cluster = MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(numDNs) - .build(); + .setNumDatanodes(numDNs); + cluster = builder.build(); cluster.waitForClusterToBeReady(); client = cluster.newClient(); } @@ -285,8 +285,7 @@ private void executeWithError(OzoneShell shell, String[] args, * @return the leader OM's Node ID in the MiniOzoneHACluster. */ private String getLeaderOMNodeId() { - MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; - OzoneManager omLeader = haCluster.getOMLeader(); + OzoneManager omLeader = cluster.getOMLeader(); assertNotNull(omLeader, "There should be a leader OM at this point."); return omLeader.getOMNodeId(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 548a8832be83..703848130f90 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -96,8 +96,7 @@ public class TestOzoneTenantShell { private static final File AUDIT_LOG_FILE = new File("audit.log"); private static OzoneConfiguration conf = null; - private static MiniOzoneCluster cluster = null; - private static MiniOzoneHAClusterImpl haCluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static OzoneShell ozoneSh = null; private static TenantShell tenantShell = null; @@ -153,12 +152,11 @@ public static void init() throws Exception { // Init cluster omServiceId = "om-service-test1"; numOfOMs = 3; - cluster = MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) + MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); + builder.setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) - .withoutDatanodes() // Remove this once we are actually writing data - .build(); - haCluster = (MiniOzoneHAClusterImpl) cluster; + .withoutDatanodes(); // Remove this once we are actually writing data + cluster = builder.build(); cluster.waitForClusterToBeReady(); } @@ -641,7 +639,7 @@ public void testOzoneTenantBasicOperations() throws IOException { // Because InMemoryMultiTenantAccessController is used in OMs for this // integration test, we need to trigger BG sync on all OMs just // in case a leader changed right after the last operation. - haCluster.getOzoneManagersList().forEach(om -> om.getMultiTenantManager() + cluster.getOzoneManagersList().forEach(om -> om.getMultiTenantManager() .getOMRangerBGSyncService().triggerRangerSyncOnce()); // Delete dev volume should fail because the volume reference count > 0L diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java index 62d50708c83a..d3d7c7766e7b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestTransferLeadershipShell.java @@ -66,7 +66,7 @@ public void init() throws Exception { conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setSCMServiceId(scmServiceId).setNumOfOzoneManagers(numOfOMs) .setNumOfStorageContainerManagers(numOfSCMs) From 384103a2ddb636201c076597d18a44f23c2cc1db Mon Sep 17 00:00:00 2001 From: Zita Dombi <50611074+dombizita@users.noreply.github.com> Date: Thu, 29 Feb 2024 17:30:21 +0100 Subject: [PATCH 084/108] HDDS-10282. Fix pagination on the OM DB Insights page in Recon (#6190) --- .../webapps/recon/ozone-recon-web/api/db.json | 180 ++------ .../recon/ozone-recon-web/api/routes.json | 33 +- .../src/views/insights/om/om.less | 17 +- .../src/views/insights/om/om.tsx | 384 +++++------------- 4 files changed, 158 insertions(+), 456 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json index e4ed0ac048e9..283430a452b3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json @@ -1036,7 +1036,7 @@ ] }, "keys": { - "totalCount": 534, + "totalCount": 15, "keys": [ { "Volume": "vol-0-20448", @@ -1098,7 +1098,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77506", "DataSize": 10240, "Versions": [ 0 @@ -1117,7 +1117,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64512", "DataSize": 5692407, "Versions": [ 0 @@ -1136,7 +1136,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69105", "DataSize": 189407, "Versions": [ 0 @@ -1155,7 +1155,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77507", "DataSize": 10240, "Versions": [ 0 @@ -1174,7 +1174,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64513", "DataSize": 5692407, "Versions": [ 0 @@ -1193,7 +1193,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69106", "DataSize": 189407, "Versions": [ 0 @@ -1212,7 +1212,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77508", "DataSize": 10240, "Versions": [ 0 @@ -1231,7 +1231,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64514", "DataSize": 5692407, "Versions": [ 0 @@ -1250,7 +1250,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69107", "DataSize": 189407, "Versions": [ 0 @@ -1269,7 +1269,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-0-77505", + "Key": "key-0-77509", "DataSize": 10240, "Versions": [ 0 @@ -1288,7 +1288,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-21-64511", + "Key": "key-21-64515", "DataSize": 5692407, "Versions": [ 0 @@ -1307,7 +1307,7 @@ { "Volume": "vol-0-20448", "Bucket": "bucket-0-12811", - "Key": "key-22-69104", + "Key": "key-22-69109", "DataSize": 189407, "Versions": [ 0 @@ -3765,7 +3765,6 @@ "totalDeletedKeys": 3 }, "omMismatch":{ - "lastKey":11, "containerDiscrepancyInfo": [ { "containerId": 1, @@ -3960,12 +3959,7 @@ } ], "existsAt": "SCM" - } - ] - }, - "omMismatch1":{ - "lastKey":21, - "containerDiscrepancyInfo": [ + }, { "containerId": 11, "numberOfKeys": 1, @@ -4198,12 +4192,7 @@ } ] }, - "omMismatch2":{ - "lastKey": null, - "containerDiscrepancyInfo": [] - }, "scmMismatch":{ - "lastKey":11, "containerDiscrepancyInfo": [ { "containerId": 1, @@ -4398,12 +4387,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch1":{ - "lastKey":21, - "containerDiscrepancyInfo": [ + }, { "containerId": 11, "numberOfKeys": 1, @@ -4597,12 +4581,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch2":{ - "lastKey":31, - "containerDiscrepancyInfo": [ + }, { "containerId": 21, "numberOfKeys": 1, @@ -4796,12 +4775,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch3":{ - "lastKey":41, - "containerDiscrepancyInfo": [ + }, { "containerId": 31, "numberOfKeys": 1, @@ -4995,12 +4969,7 @@ } ], "existsAt": "OM" - } - ] - }, - "scmMismatch4":{ - "lastKey":51, - "containerDiscrepancyInfo": [ + }, { "containerId": 41, "numberOfKeys": 1, @@ -5197,12 +5166,7 @@ } ] }, - "scmMismatch5":{ - "lastKey": null, - "containerDiscrepancyInfo": [] - }, "nonFSO": { - "lastKey": "11", "keysSummary": { "totalUnreplicatedDataSize": 10485760, "totalReplicatedDataSize": 31457280, @@ -5232,13 +5196,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "nonFSO1": { - "lastKey": "21", - "nonFSO": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191713", "path": "nonfso 11", @@ -5262,13 +5220,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "nonFSO2": { - "lastKey": "31", - "nonFSO": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2440/110569623850191713", "path": "nonfso 21", @@ -5296,19 +5248,7 @@ ], "status": "OK" }, - "nonFSO3": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, - "lastKey": "", - "replicatedDataSize": 0, - "unreplicatedDataSize": 0, - "status": "OK" - }, "fso": { - "lastKey": "11", "fso": [ { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2401/110569623850191713", @@ -5436,14 +5376,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - - } - ], - "status": "OK" - }, - "fso1": { - "lastKey": "21", - "fso": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191713", "path": "11", @@ -5551,13 +5484,7 @@ "requiredNodes": 1, "replicationType": "RATIS" } - } - ], - "status": "OK" - }, - "fso2": { - "lastKey": "31", - "fso": [ + }, { "key": "/-9223372036854775552/-9223372036854775040/-9223372036852420095/2411/110569623850191713", "path": "21", @@ -5681,19 +5608,7 @@ ], "status": "OK" }, - "fso3": { - "keysSummary": { - "totalUnreplicatedDataSize": 10485760, - "totalReplicatedDataSize": 31457280, - "totalOpenKeys": 10 - }, - "lastKey": "", - "replicatedDataSize": 0, - "unreplicatedDataSize": 0, - "status": "OK" - }, "keydeletePending":{ - "lastKey": "/volume/bucket1/rcmeevblsf/106/-9223372036843950335", "keysSummary": { "totalUnreplicatedDataSize": 29291, "totalReplicatedDataSize": 87873, @@ -6051,16 +5966,7 @@ "updateIDset": true } ] - } - ], - "status": "OK" - }, - - "keydeletePending1": { - "lastKey":"/-9223372036854775552/-9223372036854775040/-9223372036852420095/2421/110569623850191", - "replicatedTotal": -1530804718628866300, - "unreplicatedTotal": -1530804718628866300, - "deletedKeyInfo": [ + }, { "omKeyInfoList": [ { @@ -6117,15 +6023,7 @@ "updateIDset": false } ] - } - ], - "status": "OK" - }, - "keydeletePending2": { - "lastKey":"31", - "replicatedTotal": -1530804718628866300, - "unreplicatedTotal": -1530804718628866300, - "deletedKeyInfo": [ + }, { "omKeyInfoList": [ { @@ -6241,12 +6139,7 @@ ], "status": "OK" }, - "keydeletePending3": { - "lastKey":"", - "deletedKeyInfo": [] - }, "deleted": { - "lastKey": "11", "containers": [ { "containerId": 1, @@ -6417,12 +6310,7 @@ "healthy": true } ] - } - ] - }, - "deleted1": { - "lastKey": "21", - "containers": [ + }, { "containerId": 11, "numberOfKeys": 2, @@ -6592,12 +6480,7 @@ "healthy": true } ] - } - ] - }, - "deleted2": { - "lastKey": "31", - "containers": [ + }, { "containerId": 21, "numberOfKeys": 2, @@ -6767,12 +6650,7 @@ "healthy": true } ] - } - ] - }, - "deleted3": { - "lastKey": "41", - "containers": [ + }, { "containerId": 31, "numberOfKeys": 2, @@ -6808,9 +6686,5 @@ ] } ] - }, - "deleted4": { - "lastKey": null, - "containers": [] } } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json index 0bf0c69f5459..4963b9ec9472 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/routes.json @@ -37,39 +37,16 @@ "/keys/open/summary": "/keysOpenSummary", "/keys/deletePending/summary": "/keysdeletePendingSummary", - "/containers/mismatch?limit=*&prevKey=11&missingIn=OM" : "/omMismatch1", - "/containers/mismatch?limit=*&prevKey=21&missingIn=OM" : "/omMismatch2", - "/containers/mismatch?limit=*&prevKey=31&missingIn=OM" : "/omMismatch3", - "/containers/mismatch?limit=*&prevKey=41&missingIn=OM" : "/omMismatch4", + "/containers/mismatch?&missingIn=OM" : "/omMismatch", + "/containers/mismatch?limit=*&missingIn=OM" : "/omMismatch", - "/containers/mismatch?limit=*&prevKey=*&missingIn=OM" : "/omMismatch", + "/containers/mismatch?&missingIn=SCM" : "/scmMismatch", + "/containers/mismatch?limit=*&missingIn=SCM" : "/scmMismatch", - "/containers/mismatch?limit=*&prevKey=11&missingIn=SCM" : "/scmMismatch1", - "/containers/mismatch?limit=*&prevKey=21&missingIn=SCM" : "/scmMismatch2", - "/containers/mismatch?limit=*&prevKey=31&missingIn=SCM" : "/scmMismatch3", - "/containers/mismatch?limit=*&prevKey=41&missingIn=SCM" : "/scmMismatch4", - "/containers/mismatch?limit=*&prevKey=51&missingIn=SCM" : "/scmMismatch5", - - "/containers/mismatch?limit=*&prevKey=*&missingIn=SCM" : "/scmMismatch", - - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=11": "/nonFSO1", - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=21": "/nonFSO2", - "/keys/open?includeFso=false&includeNonFso=true&limit=*&prevKey=31": "/nonFSO3", "/keys/open?includeFso=false&includeNonFso=true&limit=*": "/nonFSO", - - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=11": "/fso1", - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=21": "/fso2", - "/keys/open?includeFso=true&includeNonFso=false&limit=*&prevKey=31": "/fso3", "/keys/open?includeFso=true&includeNonFso=false&limit=*": "/fso", - "/keys/deletePending?limit=*&prevKey=/volume/bucket1/rcmeevblsf/106/-9223372036843950335" : "/keydeletePending1", - "/keys/deletePending?limit=*&prevKey=/-9223372036854775552/-9223372036854775040/-9223372036852420095/2421/110569623850191" : "/keydeletePending2", - "/keys/deletePending?limit=*&prevKey=31" : "/keydeletePending3", "/keys/deletePending?limit=*" : "/keydeletePending", - "/containers/mismatch/deleted?limit=*&prevKey": "/deleted", - "/containers/mismatch/deleted?limit=*&prevKey=11": "/deleted1", - "/containers/mismatch/deleted?limit=*&prevKey=21": "/deleted2", - "/containers/mismatch/deleted?limit=*&prevKey=31": "/deleted3", - "/containers/mismatch/deleted?limit=*&prevKey=41": "/deleted4" + "/containers/mismatch/deleted?limit=*": "/deleted" } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less index 15d68dfc8600..a2f4c088c566 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.less @@ -24,4 +24,19 @@ .ant-pagination-disabled, .ant-pagination-disabled:hover, .ant-pagination-disabled:focus { color: rgba(0, 0, 0, 0.65); cursor: pointer !important; - } \ No newline at end of file + } + +.multi-select-container { + padding-left: 5px; + margin-right: 5px; + display: inline-block; + min-width: 200px; + z-index: 99; +} + +.limit-block { + font-size: 14px; + font-weight: normal; + display: inline-block; + margin-left: 20px; +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx index 1846592b8995..6836eb86e390 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/insights/om/om.tsx @@ -24,17 +24,14 @@ import moment from 'moment'; import { showDataFetchError, byteToSize } from 'utils/common'; import './om.less'; import { ColumnSearch } from 'utils/columnSearch'; -import { Link } from 'react-router-dom'; import { AxiosGetHelper, cancelRequests } from 'utils/axiosRequestHelper'; +import {IOption} from "../../../components/multiSelect/multiSelect"; +import {ActionMeta, ValueType} from "react-select"; +import CreatableSelect from "react-select/creatable"; const size = filesize.partial({ standard: 'iec' }); const { TabPane } = Tabs; -//Previous Key Need to store respective Lastkey of each API -let mismatchPrevKeyList = [0]; -let openPrevKeyList =[""]; -let keysPendingPrevList =[""]; -let deletedKeysPrevList =[0]; let keysPendingExpanded: any = []; interface IContainerResponse { containerId: number; @@ -166,7 +163,7 @@ const MISMATCH_TAB_COLUMNS = [ const OPEN_KEY_TAB_COLUMNS = [ { - title: 'Key', + title: 'Key Name', dataIndex: 'path', key: 'path', isSearchable: true @@ -295,19 +292,22 @@ interface IOmdbInsightsState { pendingDeleteKeyDataSource: any[]; expandedRowData: IExpandedRow; deletedContainerKeysDataSource: []; - prevKeyMismatch: number; mismatchMissingState: any; - prevKeyOpen: string; - prevKeyDeleted: number; - prevKeyDeletePending: string; activeTab: string; - DEFAULT_LIMIT: number, - nextClickable: boolean; includeFso: boolean; includeNonFso: boolean; - prevClickable: boolean + selectedLimit: IOption; } +const LIMIT_OPTIONS: IOption[] = [ + {label: "1000", value: "1000"}, + {label: "5000", value: "5000"}, + {label: "10000", value: "10000"}, + {label: "20000", value: "20000"} +] + +const INITIAL_LIMIT_OPTION = LIMIT_OPTIONS[0] + let cancelMismatchedEndpointSignal: AbortController; let cancelOpenKeysSignal: AbortController; let cancelDeletePendingSignal: AbortController; @@ -326,18 +326,12 @@ export class Om extends React.Component, IOmdbInsightsSta openKeysDataSource: [], pendingDeleteKeyDataSource: [], deletedContainerKeysDataSource: [], - prevKeyMismatch: 0, mismatchMissingState: 'SCM', - prevKeyOpen: "", - prevKeyDeletePending: "", - prevKeyDeleted: 0, expandedRowData: {}, activeTab: props.location.state ? props.location.state.activeTab : '1', - DEFAULT_LIMIT: 10, - nextClickable: true, includeFso: true, includeNonFso: false, - prevClickable: false + selectedLimit: INITIAL_LIMIT_OPTION }; } @@ -389,12 +383,10 @@ export class Om extends React.Component, IOmdbInsightsSta handleExistsAtChange = (e: any) => { console.log("handleExistsAtChange", e.key); if (e.key === 'OM') { - mismatchPrevKeyList = [0]; - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, 0, 'SCM'); + this.fetchMismatchContainers('SCM'); } else { - mismatchPrevKeyList = [0]; - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, 0, 'OM'); + this.fetchMismatchContainers('OM'); } }; @@ -439,26 +431,28 @@ export class Om extends React.Component, IOmdbInsightsSta handlefsoNonfsoMenuChange = (e: any) => { if (e.key === 'fso') { - openPrevKeyList =[""]; - this.fetchOpenKeys(true, false, this.state.DEFAULT_LIMIT, ""); + this.fetchOpenKeys(true, false); } else { - openPrevKeyList = [""]; - this.fetchOpenKeys(false, true, this.state.DEFAULT_LIMIT, ""); + this.fetchOpenKeys(false, true); } }; - componentDidMount(): void { + _loadData = () => { if (this.state.activeTab === '1') { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); + this.fetchMismatchContainers(this.state.mismatchMissingState); } else if (this.state.activeTab === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); + this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso); } else if (this.state.activeTab === '3') { keysPendingExpanded =[]; - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); + this.fetchDeletePendingKeys(); } else if (this.state.activeTab === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); + this.fetchDeletedKeys(); } + } + + componentDidMount(): void { + this._loadData(); }; componentWillUnmount(): void { @@ -469,11 +463,9 @@ export class Om extends React.Component, IOmdbInsightsSta cancelRowExpandSignal && cancelRowExpandSignal.abort(); } - fetchMismatchContainers = (limit: number, prevKeyMismatch: number, mismatchMissingState: any) => { + fetchMismatchContainers = (mismatchMissingState: any) => { this.setState({ loading: true, - nextClickable: true, - prevClickable: true, mismatchMissingState }); @@ -486,36 +478,17 @@ export class Om extends React.Component, IOmdbInsightsSta cancelRowExpandSignal ]); - const mismatchEndpoint = `/api/v1/containers/mismatch?limit=${limit}&prevKey=${prevKeyMismatch}&missingIn=${mismatchMissingState}` + const mismatchEndpoint = `/api/v1/containers/mismatch?limit=${this.state.selectedLimit.value}&missingIn=${mismatchMissingState}` const { request, controller } = AxiosGetHelper(mismatchEndpoint, cancelMismatchedEndpointSignal) cancelMismatchedEndpointSignal = controller; request.then(mismatchContainersResponse => { const mismatchContainers: IContainerResponse[] = mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.containerDiscrepancyInfo; - if (mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.lastKey === null) { - //No Further Records may be last record - mismatchPrevKeyList = [0]; - this.setState({ - loading: false, - nextClickable: false, - mismatchDataSource: mismatchContainers, - expandedRowData: {}, - }) - } - else { - if (this.state.prevKeyMismatch === 0 ){ - this.setState({ - prevClickable: false - }) - } - if (mismatchPrevKeyList.includes(mismatchContainersResponse.data.lastKey) === false) { - mismatchPrevKeyList.push(mismatchContainersResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyMismatch: mismatchContainersResponse && mismatchContainersResponse.data && mismatchContainersResponse.data.lastKey, - mismatchDataSource: mismatchContainers, - }); - } + + this.setState({ + loading: false, + mismatchDataSource: mismatchContainers + }); + }).catch(error => { this.setState({ loading: false, @@ -524,11 +497,9 @@ export class Om extends React.Component, IOmdbInsightsSta }); }; - fetchOpenKeys = (includeFso: boolean, includeNonFso: boolean, limit: number, prevKeyOpen: string) => { + fetchOpenKeys = (includeFso: boolean, includeNonFso: boolean) => { this.setState({ loading: true, - nextClickable: true, - prevClickable: true, includeFso, includeNonFso }); @@ -542,13 +513,7 @@ export class Om extends React.Component, IOmdbInsightsSta cancelRowExpandSignal ]); - let openKeysEndpoint; - if (prevKeyOpen === "") { - openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${limit}`; - } - else { - openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${limit}&prevKey=${prevKeyOpen}`; - } + let openKeysEndpoint = `/api/v1/keys/open?includeFso=${includeFso}&includeNonFso=${includeNonFso}&limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(openKeysEndpoint, cancelOpenKeysSignal) cancelOpenKeysSignal = controller @@ -560,31 +525,11 @@ export class Om extends React.Component, IOmdbInsightsSta openKeys[key] && openKeys[key].map((item: any) => (allopenKeysResponse.push({ ...item, type: key }))); } } + this.setState({ + loading: false, + openKeysDataSource: allopenKeysResponse, + }) - if (openKeysResponse && openKeysResponse.data && openKeysResponse.data.lastKey === "") { - //last key of api is null may be last record no further records - openPrevKeyList = [""]; - this.setState({ - loading: false, - nextClickable: false, - openKeysDataSource: allopenKeysResponse - }) - } - else { - if (this.state.prevKeyOpen === "" ){ - this.setState({ - prevClickable: false - }) - } - if (openPrevKeyList.includes(openKeysResponse.data.lastKey) === false) { - openPrevKeyList.push(openKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyOpen: openKeysResponse && openKeysResponse.data && openKeysResponse.data.lastKey, - openKeysDataSource: allopenKeysResponse, - }) - }; }).catch(error => { this.setState({ loading: false @@ -594,11 +539,9 @@ export class Om extends React.Component, IOmdbInsightsSta }; - fetchDeletePendingKeys = (limit: number, prevKeyDeletePending: string) => { + fetchDeletePendingKeys = () => { this.setState({ - loading: true, - nextClickable: true, - prevClickable :true + loading: true }); //Cancel any previous pending request @@ -611,13 +554,7 @@ export class Om extends React.Component, IOmdbInsightsSta ]); keysPendingExpanded =[]; - let deletePendingKeysEndpoint; - if (prevKeyDeletePending === "" || prevKeyDeletePending === undefined ) { - deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${limit}`; - } - else { - deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${limit}&prevKey=${prevKeyDeletePending}`; - } + let deletePendingKeysEndpoint = `/api/v1/keys/deletePending?limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(deletePendingKeysEndpoint, cancelDeletePendingSignal); cancelDeletePendingSignal = controller; @@ -646,30 +583,11 @@ export class Om extends React.Component, IOmdbInsightsSta } }); - if (deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.lastKey === "") { - //last key of api is empty may be last record no further records - keysPendingPrevList =[""]; - this.setState({ - loading: false, - nextClickable: false, - pendingDeleteKeyDataSource: deletedKeyInfoData - }) - } - else { - if (this.state.prevKeyDeletePending === "" ||this.state.prevKeyDeletePending === undefined ){ - this.setState({ - prevClickable: false - }) - } - if (keysPendingPrevList.includes(deletePendingKeysResponse.data.lastKey) === false) { - keysPendingPrevList.push(deletePendingKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyDeletePending: deletePendingKeysResponse && deletePendingKeysResponse.data && deletePendingKeysResponse.data.lastKey, - pendingDeleteKeyDataSource: deletedKeyInfoData - }); - } + this.setState({ + loading: false, + pendingDeleteKeyDataSource: deletedKeyInfoData + }); + }).catch(error => { this.setState({ loading: false, @@ -722,11 +640,9 @@ export class Om extends React.Component, IOmdbInsightsSta ); } - fetchDeletedKeys = (limit: number, prevKeyDeleted: number) => { + fetchDeletedKeys = () => { this.setState({ - loading: true, - nextClickable: true, - prevClickable: true + loading: true }); //Cancel any previous pending request @@ -738,37 +654,16 @@ export class Om extends React.Component, IOmdbInsightsSta cancelRowExpandSignal ]); - const deletedKeysEndpoint = `/api/v1/containers/mismatch/deleted?limit=${limit}&prevKey=${prevKeyDeleted}`; + const deletedKeysEndpoint = `/api/v1/containers/mismatch/deleted?limit=${this.state.selectedLimit.value}`; const { request, controller } = AxiosGetHelper(deletedKeysEndpoint, cancelDeletedKeysSignal); cancelDeletedKeysSignal = controller request.then(deletedKeysResponse => { let deletedContainerKeys = []; deletedContainerKeys = deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.containers; - if (deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.lastKey === null) { - // no more further records last key - deletedKeysPrevList = [0]; - this.setState({ - loading: false, - nextClickable: false, - deletedContainerKeysDataSource: deletedContainerKeys, - expandedRowData: {}, - }) - } - else { - if (this.state.prevKeyDeleted === 0 ){ - this.setState({ - prevClickable: false - }) - } - if (deletedKeysPrevList.includes(deletedKeysResponse.data.lastKey) === false) { - deletedKeysPrevList.push(deletedKeysResponse.data.lastKey); - } - this.setState({ - loading: false, - prevKeyDeleted: deletedKeysResponse && deletedKeysResponse.data && deletedKeysResponse.data.lastKey, - deletedContainerKeysDataSource: deletedContainerKeys - }) - }; + this.setState({ + loading: false, + deletedContainerKeysDataSource: deletedContainerKeys + }) }).catch(error => { this.setState({ loading: false @@ -778,11 +673,6 @@ export class Om extends React.Component, IOmdbInsightsSta }; changeTab = (activeKey: any) => { - //when changing tab make empty all datasets and prevkey and deafult filtering to intial values also cancel all pending requests - mismatchPrevKeyList = [0]; - openPrevKeyList =[""]; - keysPendingPrevList =[""]; - deletedKeysPrevList =[0]; this.setState({ activeTab: activeKey, mismatchDataSource: [], @@ -790,121 +680,27 @@ export class Om extends React.Component, IOmdbInsightsSta pendingDeleteKeyDataSource: [], deletedContainerKeysDataSource: [], expandedRowData: {}, - prevKeyOpen: "", - prevKeyDeletePending: "", - prevKeyDeleted: 0, - prevKeyMismatch: 0, mismatchMissingState: 'SCM', includeFso: true, includeNonFso: false, - DEFAULT_LIMIT: 10, - + selectedLimit: INITIAL_LIMIT_OPTION }, () => { if (activeKey === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); + this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso); } else if (activeKey === '3') { keysPendingExpanded =[]; - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); + this.fetchDeletePendingKeys(); } else if (activeKey === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); + this.fetchDeletedKeys(); } else { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); + this.fetchMismatchContainers(this.state.mismatchMissingState); } }) }; - fetchPreviousRecords = () => { - // to fetch previous call stored all prevkey in array and fetching in respective tabs - if (this.state.activeTab === '2') { - this.setState({ - prevKeyOpen: openPrevKeyList[openPrevKeyList.indexOf(this.state.prevKeyOpen)-2] - }, () => { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT,this.state.prevKeyOpen); - }) - } else if (this.state.activeTab === '3') { - this.setState({ - prevKeyDeletePending: keysPendingPrevList[keysPendingPrevList.indexOf(this.state.prevKeyDeletePending)-2] - }, () => { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - }) - } else if (this.state.activeTab === '4') { - this.setState({ - prevKeyDeleted: deletedKeysPrevList[deletedKeysPrevList.indexOf(this.state.prevKeyDeleted)-2] - }, () => { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT,this.state.prevKeyDeleted); - }) - } - else { - this.setState({ - prevKeyMismatch: mismatchPrevKeyList[mismatchPrevKeyList.indexOf(this.state.prevKeyMismatch)-2] - }, () => { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT,this.state.prevKeyMismatch, this.state.mismatchMissingState); - }) - } - }; - - fetchNextRecords = () => { - // To Call API for Page Level for each page fetch next records - if (this.state.activeTab === '2') { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT, this.state.prevKeyOpen); - } else if (this.state.activeTab === '3') { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - } else if (this.state.activeTab === '4') { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); - } - else { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT, this.state.prevKeyMismatch, this.state.mismatchMissingState); - } - }; - - itemRender = (_: any, type: string, originalElement: any) => { - if (type === 'prev') { - return

{this.state.prevClickable ? Prev: No Records}
; - } - if (type === 'next') { - return
{this.state.nextClickable ? {'>>'} : No More Further Records}
; - } - return originalElement; - }; - onShowSizeChange = (current: number, pageSize: number) => { - if (this.state.activeTab === '2') { - //open keys - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyOpen: openPrevKeyList[openPrevKeyList.indexOf(this.state.prevKeyOpen)-1] - }, () => { - this.fetchOpenKeys(this.state.includeFso, this.state.includeNonFso, this.state.DEFAULT_LIMIT,this.state.prevKeyOpen); - }); - } - else if (this.state.activeTab === '3') { - //keys pending for deletion - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyDeletePending: keysPendingPrevList[keysPendingPrevList.indexOf(this.state.prevKeyDeletePending)-1] - }, () => { - this.fetchDeletePendingKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeletePending); - }) - } - else if (this.state.activeTab === '4') { - //deleted container keys - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyDeleted: deletedKeysPrevList[deletedKeysPrevList.indexOf(this.state.prevKeyDeleted)-1] - }, () => { - this.fetchDeletedKeys(this.state.DEFAULT_LIMIT, this.state.prevKeyDeleted); - }) - } - else { - // active tab 1 for mismatch - this.setState({ - DEFAULT_LIMIT: pageSize, - prevKeyMismatch: mismatchPrevKeyList[mismatchPrevKeyList.indexOf(this.state.prevKeyMismatch)-1] - }, () => { - this.fetchMismatchContainers(this.state.DEFAULT_LIMIT,this.state.prevKeyMismatch, this.state.mismatchMissingState); - }); - } + console.log(current, pageSize); }; onRowExpandClick = (expanded: boolean, record: IContainerResponse) => { @@ -1039,16 +835,35 @@ export class Om extends React.Component, IOmdbInsightsSta }, []) }; + _handleLimitChange = (selected: ValueType, _action: ActionMeta) => { + const selectedLimit = (selected as IOption) + this.setState({ + selectedLimit + }, this._loadData); + } + + _onCreateOption = (created: string) => { + // Check that it's a numeric and non-negative + if (parseInt(created)) { + const createdOption: IOption = { + label: created, + value: created + } + this.setState({ + selectedLimit: createdOption + }, this._loadData); + } else { + console.log('Not a valid option') + } + } + render() { - const { mismatchDataSource, loading, openKeysDataSource, pendingDeleteKeyDataSource, deletedContainerKeysDataSource } = this.state; + const { mismatchDataSource, loading, openKeysDataSource, pendingDeleteKeyDataSource, deletedContainerKeysDataSource, selectedLimit } = this.state; const paginationConfig: PaginationConfig = { - pageSize:this.state.DEFAULT_LIMIT, - defaultPageSize: this.state.DEFAULT_LIMIT, - pageSizeOptions: ['10', '20', '30', '50'], + showTotal: (total: number, range) => `${range[0]}-${range[1]} of ${total}`, showSizeChanger: true, onShowSizeChange: this.onShowSizeChange, - itemRender: this.itemRender }; const generateMismatchTable = (dataSource: any) => { @@ -1065,7 +880,7 @@ export class Om extends React.Component, IOmdbInsightsSta return } @@ -1093,11 +908,32 @@ export class Om extends React.Component, IOmdbInsightsSta return ( -
+
OM DB Insights
+
+ { + // Only number will be accepted + return !isNaN(parseInt(input)) + }} + options={LIMIT_OPTIONS} + hideSelectedOptions={false} + value={selectedLimit} + createOptionPosition='last' + formatCreateLabel={(input) => { + return `new limit... ${input}` + }} + /> Limit +
{generateMismatchTable(mismatchDataSource)} From 0a5fc695b4c860e4aba936fec6a9c3bd95c8d971 Mon Sep 17 00:00:00 2001 From: Arafat2198 <98023601+ArafatKhan2198@users.noreply.github.com> Date: Fri, 1 Mar 2024 11:17:22 +0530 Subject: [PATCH 085/108] HDDS-7810. Support namespace summaries (du, dist & counts) for OBJECT_STORE buckets. (#4245) --- .../recon/api/handlers/BucketHandler.java | 4 +- .../recon/api/handlers/FSOBucketHandler.java | 2 +- .../api/handlers/LegacyBucketHandler.java | 2 +- .../recon/api/handlers/OBSBucketHandler.java | 268 ++++ .../ozone/recon/tasks/NSSummaryTask.java | 41 +- .../recon/tasks/NSSummaryTaskWithOBS.java | 236 ++++ .../api/TestNSSummaryEndpointWithOBS.java | 1127 +++++++++++++++++ .../ozone/recon/tasks/TestNSSummaryTask.java | 4 +- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 548 ++++++++ 9 files changed, 2216 insertions(+), 16 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java index 09cbf4fe4e40..34dcba40f81b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/BucketHandler.java @@ -178,9 +178,7 @@ public static BucketHandler getBucketHandler( omMetadataManager, reconSCM, bucketInfo); } else if (bucketInfo.getBucketLayout() .equals(BucketLayout.OBJECT_STORE)) { - // TODO: HDDS-7810 Write a handler for object store bucket - // We can use LegacyBucketHandler for OBS bucket for now. - return new LegacyBucketHandler(reconNamespaceSummaryManager, + return new OBSBucketHandler(reconNamespaceSummaryManager, omMetadataManager, reconSCM, bucketInfo); } else { LOG.error("Unsupported bucket layout: " + diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java index 26cda6442d4e..8a1c5babe75e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/FSOBucketHandler.java @@ -42,7 +42,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling FSO buckets. + * Class for handling FSO buckets NameSpaceSummaries. */ public class FSOBucketHandler extends BucketHandler { private static final Logger LOG = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java index 3dd1ddbdabb9..09f1c5bc7454 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/LegacyBucketHandler.java @@ -41,7 +41,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; /** - * Class for handling Legacy buckets. + * Class for handling Legacy buckets NameSpaceSummaries. */ public class LegacyBucketHandler extends BucketHandler { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java new file mode 100644 index 000000000000..024eec989a10 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/handlers/OBSBucketHandler.java @@ -0,0 +1,268 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon.api.handlers; + + +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; + +import java.io.IOException; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +/** + * Class for handling OBS buckets NameSpaceSummaries. + */ +public class OBSBucketHandler extends BucketHandler { + + private final String vol; + private final String bucket; + private final OmBucketInfo omBucketInfo; + + public OBSBucketHandler( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager omMetadataManager, + OzoneStorageContainerManager reconSCM, + OmBucketInfo bucketInfo) { + super(reconNamespaceSummaryManager, omMetadataManager, + reconSCM); + this.omBucketInfo = bucketInfo; + this.vol = omBucketInfo.getVolumeName(); + this.bucket = omBucketInfo.getBucketName(); + } + + /** + * Helper function to check if a path is a key, or invalid. + * + * @param keyName key name + * @return KEY, or UNKNOWN + * @throws IOException + */ + @Override + public EntityType determineKeyPath(String keyName) throws IOException { + String key = OM_KEY_PREFIX + vol + + OM_KEY_PREFIX + bucket + + OM_KEY_PREFIX + keyName; + + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + iterator.seek(key); + if (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + if (dbKey.equals(key)) { + return EntityType.KEY; + } + } + } + return EntityType.UNKNOWN; + } + + /** + * This method handles disk usage of direct keys. + * + * @param parentId The identifier for the parent bucket. + * @param withReplica if withReplica is enabled, set sizeWithReplica + * for each direct key's DU + * @param listFile if listFile is enabled, append key DU as a children + * keys + * @param duData the current DU data + * @param normalizedPath the normalized path request + * @return the total DU of all direct keys + * @throws IOException IOE + */ + @Override + public long handleDirectKeys(long parentId, boolean withReplica, + boolean listFile, + List duData, + String normalizedPath) throws IOException { + + NSSummary nsSummary = getReconNamespaceSummaryManager() + .getNSSummary(parentId); + // Handle the case of an empty bucket. + if (nsSummary == null) { + return 0; + } + + Table keyTable = getKeyTable(); + long keyDataSizeWithReplica = 0L; + + try ( + TableIterator> + iterator = keyTable.iterator()) { + + String seekPrefix = OM_KEY_PREFIX + + vol + + OM_KEY_PREFIX + + bucket + + OM_KEY_PREFIX; + + iterator.seek(seekPrefix); + + while (iterator.hasNext()) { + // KeyName : OmKeyInfo-Object + Table.KeyValue kv = iterator.next(); + String dbKey = kv.getKey(); + + // Exit loop if the key doesn't match the seekPrefix. + if (!dbKey.startsWith(seekPrefix)) { + break; + } + + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + DUResponse.DiskUsage diskUsage = new DUResponse.DiskUsage(); + String objectName = keyInfo.getKeyName(); + diskUsage.setSubpath(objectName); + diskUsage.setKey(true); + diskUsage.setSize(keyInfo.getDataSize()); + + if (withReplica) { + long keyDU = keyInfo.getReplicatedSize(); + keyDataSizeWithReplica += keyDU; + diskUsage.setSizeWithReplica(keyDU); + } + // List all the keys for the OBS bucket if requested. + if (listFile) { + duData.add(diskUsage); + } + } + } + } + + return keyDataSizeWithReplica; + } + + /** + * Calculates the total disk usage (DU) for an Object Store Bucket (OBS) by + * summing the sizes of all keys contained within the bucket. + * Since OBS buckets operate on a flat hierarchy, this method iterates through + * all the keys in the bucket without the need to traverse directories. + * + * @param parentId The identifier for the parent bucket. + * @return The total disk usage of all keys within the specified OBS bucket. + * @throws IOException + */ + @Override + public long calculateDUUnderObject(long parentId) throws IOException { + // Initialize the total disk usage variable. + long totalDU = 0L; + + // Access the key table for the bucket. + Table keyTable = getKeyTable(); + + try ( + TableIterator> + iterator = keyTable.iterator()) { + // Construct the seek prefix to filter keys under this bucket. + String seekPrefix = + OM_KEY_PREFIX + vol + OM_KEY_PREFIX + bucket + OM_KEY_PREFIX; + iterator.seek(seekPrefix); + + // Iterate over keys in the bucket. + while (iterator.hasNext()) { + Table.KeyValue kv = iterator.next(); + String keyName = kv.getKey(); + + // Break the loop if the current key does not start with the seekPrefix. + if (!keyName.startsWith(seekPrefix)) { + break; + } + + // Sum the size of each key to the total disk usage. + OmKeyInfo keyInfo = kv.getValue(); + if (keyInfo != null) { + totalDU += keyInfo.getDataSize(); + } + } + } + + // Return the total disk usage of all keys in the bucket. + return totalDU; + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public long getDirObjectId(String[] names) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public long getDirObjectId(String[] names, int cutoff) + throws UnsupportedOperationException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + /** + * Returns the keyInfo object from the KEY table. + * @return OmKeyInfo + */ + @Override + public OmKeyInfo getKeyInfo(String[] names) throws IOException { + String ozoneKey = OM_KEY_PREFIX; + ozoneKey += String.join(OM_KEY_PREFIX, names); + + return getKeyTable().getSkipCache(ozoneKey); + } + + /** + * Object stores do not support directories. + * + * @throws UnsupportedOperationException + */ + @Override + public OmDirectoryInfo getDirInfo(String[] names) throws IOException { + throw new UnsupportedOperationException( + "Object stores do not support directories."); + } + + public Table getKeyTable() { + return getOmMetadataManager().getKeyTable(getBucketLayout()); + } + + public BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 42356191c501..5c3395084464 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -38,6 +38,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; /** * Task to query data from OMDB and write into Recon RocksDB. @@ -68,6 +69,7 @@ public class NSSummaryTask implements ReconOmTask { private final ReconOMMetadataManager reconOMMetadataManager; private final NSSummaryTaskWithFSO nsSummaryTaskWithFSO; private final NSSummaryTaskWithLegacy nsSummaryTaskWithLegacy; + private final NSSummaryTaskWithOBS nsSummaryTaskWithOBS; private final OzoneConfiguration ozoneConfiguration; @Inject @@ -86,6 +88,9 @@ public NSSummaryTask(ReconNamespaceSummaryManager this.nsSummaryTaskWithLegacy = new NSSummaryTaskWithLegacy( reconNamespaceSummaryManager, reconOMMetadataManager, ozoneConfiguration); + this.nsSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); } @Override @@ -95,20 +100,28 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { - boolean success; - success = nsSummaryTaskWithFSO.processWithFSO(events); - if (success) { - success = nsSummaryTaskWithLegacy.processWithLegacy(events); - } else { + boolean success = nsSummaryTaskWithFSO.processWithFSO(events); + if (!success) { LOG.error("processWithFSO failed."); } + success = nsSummaryTaskWithLegacy.processWithLegacy(events); + if (!success) { + LOG.error("processWithLegacy failed."); + } + success = nsSummaryTaskWithOBS.processWithOBS(events); + if (!success) { + LOG.error("processWithOBS failed."); + } return new ImmutablePair<>(getTaskName(), success); } @Override public Pair reprocess(OMMetadataManager omMetadataManager) { + // Initialize a list of tasks to run in parallel Collection> tasks = new ArrayList<>(); + long startTime = System.nanoTime(); // Record start time + try { // reinit Recon RocksDB's namespace CF. reconNamespaceSummaryManager.clearNSSummaryTable(); @@ -122,6 +135,8 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { .reprocessWithFSO(omMetadataManager)); tasks.add(() -> nsSummaryTaskWithLegacy .reprocessWithLegacy(reconOMMetadataManager)); + tasks.add(() -> nsSummaryTaskWithOBS + .reprocessWithOBS(reconOMMetadataManager)); List> results; ThreadFactory threadFactory = new ThreadFactoryBuilder() @@ -137,17 +152,25 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { } } } catch (InterruptedException ex) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex); return new ImmutablePair<>(getTaskName(), false); } catch (ExecutionException ex2) { - LOG.error("Error while reprocessing NSSummary " + - "table in Recon DB. ", ex2); + LOG.error("Error while reprocessing NSSummary table in Recon DB.", ex2); return new ImmutablePair<>(getTaskName(), false); } finally { executorService.shutdown(); + + long endTime = System.nanoTime(); + // Convert to milliseconds + long durationInMillis = + TimeUnit.NANOSECONDS.toMillis(endTime - startTime); + + // Log performance metrics + LOG.info("Task execution time: {} milliseconds", durationInMillis); } + return new ImmutablePair<>(getTaskName(), true); } + } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..34c7dc967c3a --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTaskWithOBS.java @@ -0,0 +1,236 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE; + + +/** + * Class for handling OBS specific tasks. + */ +public class NSSummaryTaskWithOBS extends NSSummaryTaskDbEventHandler { + + private static final BucketLayout BUCKET_LAYOUT = BucketLayout.OBJECT_STORE; + + private static final Logger LOG = + LoggerFactory.getLogger(NSSummaryTaskWithOBS.class); + + + public NSSummaryTaskWithOBS( + ReconNamespaceSummaryManager reconNamespaceSummaryManager, + ReconOMMetadataManager reconOMMetadataManager, + OzoneConfiguration ozoneConfiguration) { + super(reconNamespaceSummaryManager, + reconOMMetadataManager, ozoneConfiguration); + } + + + public boolean reprocessWithOBS(OMMetadataManager omMetadataManager) { + Map nsSummaryMap = new HashMap<>(); + + try { + Table keyTable = + omMetadataManager.getKeyTable(BUCKET_LAYOUT); + + try (TableIterator> + keyTableIter = keyTable.iterator()) { + + while (keyTableIter.hasNext()) { + Table.KeyValue kv = keyTableIter.next(); + OmKeyInfo keyInfo = kv.getValue(); + + // KeyTable entries belong to both Legacy and OBS buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = keyInfo.getVolumeName(); + String bucketName = keyInfo.getBucketName(); + String bucketDBKey = omMetadataManager + .getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = omMetadataManager + .getBucketTable().getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + continue; + } + + setKeyParentID(keyInfo); + + handlePutKeyEvent(keyInfo, nsSummaryMap); + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + } + } catch (IOException ioEx) { + LOG.error("Unable to reprocess Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + + // flush and commit left out entries at end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + LOG.info("Completed a reprocess run of NSSummaryTaskWithOBS"); + return true; + } + + public boolean processWithOBS(OMUpdateEventBatch events) { + Iterator eventIterator = events.getIterator(); + Map nsSummaryMap = new HashMap<>(); + + while (eventIterator.hasNext()) { + OMDBUpdateEvent omdbUpdateEvent = + eventIterator.next(); + OMDBUpdateEvent.OMDBUpdateAction action = omdbUpdateEvent.getAction(); + + // We only process updates on OM's KeyTable + String table = omdbUpdateEvent.getTable(); + boolean updateOnKeyTable = table.equals(KEY_TABLE); + if (!updateOnKeyTable) { + continue; + } + + String updatedKey = omdbUpdateEvent.getKey(); + + try { + OMDBUpdateEvent keyTableUpdateEvent = omdbUpdateEvent; + Object value = keyTableUpdateEvent.getValue(); + Object oldValue = keyTableUpdateEvent.getOldValue(); + if (value == null) { + LOG.warn("Value is null for key {}. Skipping processing.", + updatedKey); + continue; + } else if (!(value instanceof OmKeyInfo)) { + LOG.warn("Unexpected value type {} for key {}. Skipping processing.", + value.getClass().getName(), updatedKey); + continue; + } + + OmKeyInfo updatedKeyInfo = (OmKeyInfo) value; + OmKeyInfo oldKeyInfo = (OmKeyInfo) oldValue; + + // KeyTable entries belong to both OBS and Legacy buckets. + // Check bucket layout and if it's anything other than OBS, + // continue to the next iteration. + String volumeName = updatedKeyInfo.getVolumeName(); + String bucketName = updatedKeyInfo.getBucketName(); + String bucketDBKey = + getReconOMMetadataManager().getBucketKey(volumeName, bucketName); + // Get bucket info from bucket table + OmBucketInfo omBucketInfo = getReconOMMetadataManager().getBucketTable() + .getSkipCache(bucketDBKey); + + if (omBucketInfo.getBucketLayout() != BUCKET_LAYOUT) { + continue; + } + + setKeyParentID(updatedKeyInfo); + + switch (action) { + case PUT: + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case DELETE: + handleDeleteKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + case UPDATE: + if (oldKeyInfo != null) { + // delete first, then put + setKeyParentID(oldKeyInfo); + handleDeleteKeyEvent(oldKeyInfo, nsSummaryMap); + } else { + LOG.warn("Update event does not have the old keyInfo for {}.", + updatedKey); + } + handlePutKeyEvent(updatedKeyInfo, nsSummaryMap); + break; + default: + LOG.debug("Skipping DB update event: {}", action); + } + + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } catch (IOException ioEx) { + LOG.error("Unable to process Namespace Summary data in Recon DB. ", + ioEx); + return false; + } + if (!checkAndCallFlushToDB(nsSummaryMap)) { + return false; + } + } + + // Flush and commit left-out entries at the end + if (!flushAndCommitNSToDB(nsSummaryMap)) { + return false; + } + + LOG.info("Completed a process run of NSSummaryTaskWithOBS"); + return true; + } + + + /** + * KeyTable entries don't have the parentId set. + * In order to reuse the existing methods that rely on + * the parentId, we have to set it explicitly. + * Note: For an OBS key, the parentId will always correspond to the ID of the + * OBS bucket in which it is located. + * + * @param keyInfo + * @throws IOException + */ + private void setKeyParentID(OmKeyInfo keyInfo) + throws IOException { + String bucketKey = getReconOMMetadataManager() + .getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + OmBucketInfo parentBucketInfo = + getReconOMMetadataManager().getBucketTable().getSkipCache(bucketKey); + + if (parentBucketInfo != null) { + keyInfo.setParentObjectID(parentBucketInfo.getObjectID()); + } else { + throw new IOException("ParentKeyInfo for " + + "NSSummaryTaskWithOBS is null"); + } + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java new file mode 100644 index 000000000000..ac8dee5f0937 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestNSSummaryEndpointWithOBS.java @@ -0,0 +1,1127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.ozone.om.helpers.QuotaUtil.getReplicatedSize; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.StorageType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.handlers.BucketHandler; +import org.apache.hadoop.ozone.recon.api.handlers.EntityHandler; +import org.apache.hadoop.ozone.recon.api.types.BucketObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.DUResponse; +import org.apache.hadoop.ozone.recon.api.types.EntityType; +import org.apache.hadoop.ozone.recon.api.types.NamespaceSummaryResponse; +import org.apache.hadoop.ozone.recon.api.types.QuotaUsageResponse; +import org.apache.hadoop.ozone.recon.api.types.ResponseStatus; +import org.apache.hadoop.ozone.recon.api.types.VolumeObjectDBInfo; +import org.apache.hadoop.ozone.recon.api.types.FileSizeDistributionResponse; +import org.apache.hadoop.ozone.recon.common.CommonUtils; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.NSSummaryTaskWithOBS; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import javax.ws.rs.core.Response; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProvider; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Test for NSSummary REST APIs with OBS. + * Testing is done on a simple object store model with a flat hierarchy: + * Testing the following case. + * ├── vol + * │ ├── bucket1 + * │ │ ├── file1 + * │ │ └── file2 + * │ │ └── file3 + * │ └── bucket2 + * │ ├── file4 + * │ └── file5 + * └── vol2 + * ├── bucket3 + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 + * └── file11 + * This tests the Rest APIs for NSSummary in the context of OBS buckets, + * focusing on disk usage, quota usage, and file size distribution. + */ +public class TestNSSummaryEndpointWithOBS { + @TempDir + private Path temporaryFolder; + + private ReconOMMetadataManager reconOMMetadataManager; + private NSSummaryEndpoint nsSummaryEndpoint; + private OzoneConfiguration conf; + private CommonUtils commonUtils; + + private static final String TEST_PATH_UTILITY = + "/vol1/buck1/a/b/c/d/e/file1.txt"; + private static final String PARENT_DIR = "vol1/buck1/a/b/c/d/e"; + private static final String[] TEST_NAMES = + new String[]{"vol1", "buck1", "a", "b", "c", "d", "e", "file1.txt"}; + private static final String TEST_KEY_NAMES = "a/b/c/d/e/file1.txt"; + + // Object names + private static final String VOL = "vol"; + private static final String VOL_TWO = "vol2"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String BUCKET_THREE = "bucket3"; + private static final String BUCKET_FOUR = "bucket4"; + private static final String KEY_ONE = "file1"; + private static final String KEY_TWO = "file2"; + private static final String KEY_THREE = "file3"; + private static final String KEY_FOUR = "file4"; + private static final String KEY_FIVE = "file5"; + private static final String KEY_EIGHT = "file8"; + private static final String KEY_NINE = "file9"; + private static final String KEY_TEN = "file10"; + private static final String KEY_ELEVEN = "file11"; + private static final String MULTI_BLOCK_FILE = KEY_THREE; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long VOL_TWO_OBJECT_ID = 14L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long BUCKET_THREE_OBJECT_ID = 15L; + private static final long BUCKET_FOUR_OBJECT_ID = 16L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_EIGHT_OBJECT_ID = 17L; + private static final long KEY_NINE_OBJECT_ID = 19L; + private static final long KEY_TEN_OBJECT_ID = 20L; + private static final long KEY_ELEVEN_OBJECT_ID = 21L; + private static final long MULTI_BLOCK_KEY_OBJECT_ID = 13L; + + // container IDs + private static final long CONTAINER_ONE_ID = 1L; + private static final long CONTAINER_TWO_ID = 2L; + private static final long CONTAINER_THREE_ID = 3L; + private static final long CONTAINER_FOUR_ID = 4L; + private static final long CONTAINER_FIVE_ID = 5L; + private static final long CONTAINER_SIX_ID = 6L; + + // replication factors + private static final int CONTAINER_ONE_REPLICA_COUNT = 3; + private static final int CONTAINER_TWO_REPLICA_COUNT = 2; + private static final int CONTAINER_THREE_REPLICA_COUNT = 4; + private static final int CONTAINER_FOUR_REPLICA_COUNT = 5; + private static final int CONTAINER_FIVE_REPLICA_COUNT = 2; + private static final int CONTAINER_SIX_REPLICA_COUNT = 3; + + // block lengths + private static final long BLOCK_ONE_LENGTH = 1000L; + private static final long BLOCK_TWO_LENGTH = 2000L; + private static final long BLOCK_THREE_LENGTH = 3000L; + private static final long BLOCK_FOUR_LENGTH = 4000L; + private static final long BLOCK_FIVE_LENGTH = 5000L; + private static final long BLOCK_SIX_LENGTH = 6000L; + + // data size in bytes + private static final long FILE_ONE_SIZE = 500L; // bin 0 + private static final long FILE_TWO_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_THREE_SIZE = 4 * OzoneConsts.KB + 1; // bin 3 + private static final long FILE_FOUR_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_FIVE_SIZE = 100L; // bin 0 + private static final long FILE_EIGHT_SIZE = OzoneConsts.KB + 1; // bin 1 + private static final long FILE_NINE_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_TEN_SIZE = 2 * OzoneConsts.KB + 1; // bin 2 + private static final long FILE_ELEVEN_SIZE = OzoneConsts.KB + 1; // bin 1 + + private static final long FILE1_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_ONE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE2_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TWO_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE3_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_THREE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE4_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_FOUR_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE5_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_FIVE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long FILE8_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_EIGHT_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE9_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_NINE_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE10_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_TEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + private static final long FILE11_SIZE_WITH_REPLICA = + getReplicatedSize(FILE_ELEVEN_SIZE, + StandaloneReplicationConfig.getInstance(ONE)); + + private static final long MULTI_BLOCK_KEY_SIZE_WITH_REPLICA + = FILE3_SIZE_WITH_REPLICA; + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA + + FILE8_SIZE_WITH_REPLICA + + FILE9_SIZE_WITH_REPLICA + + FILE10_SIZE_WITH_REPLICA + + FILE11_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA + + FILE4_SIZE_WITH_REPLICA + + FILE5_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1 + = FILE1_SIZE_WITH_REPLICA + + FILE2_SIZE_WITH_REPLICA + + FILE3_SIZE_WITH_REPLICA; + + private static final long + MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY + = FILE4_SIZE_WITH_REPLICA; + + // quota in bytes + private static final long ROOT_QUOTA = 2 * (2 * OzoneConsts.MB); + private static final long VOL_QUOTA = 2 * OzoneConsts.MB; + private static final long VOL_TWO_QUOTA = 2 * OzoneConsts.MB; + private static final long BUCKET_ONE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_TWO_QUOTA = OzoneConsts.MB; + private static final long BUCKET_THREE_QUOTA = OzoneConsts.MB; + private static final long BUCKET_FOUR_QUOTA = OzoneConsts.MB; + + // mock client's path requests + private static final String TEST_USER = "TestUser"; + private static final String ROOT_PATH = "/"; + private static final String VOL_PATH = ROOT_PATH + VOL; + private static final String VOL_TWO_PATH = ROOT_PATH + VOL_TWO; + private static final String BUCKET_ONE_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE; + private static final String BUCKET_TWO_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO; + private static final String KEY_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_TWO + ROOT_PATH + KEY_FOUR; + private static final String MULTI_BLOCK_KEY_PATH = + ROOT_PATH + VOL + ROOT_PATH + BUCKET_ONE + ROOT_PATH + KEY_THREE; + private static final String INVALID_PATH = "/vol/path/not/found"; + + // some expected answers + private static final long ROOT_DATA_SIZE = + FILE_ONE_SIZE + FILE_TWO_SIZE + FILE_THREE_SIZE + FILE_FOUR_SIZE + + FILE_FIVE_SIZE + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + + FILE_ELEVEN_SIZE; + private static final long VOL_DATA_SIZE = FILE_ONE_SIZE + FILE_TWO_SIZE + + FILE_THREE_SIZE + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + + private static final long VOL_TWO_DATA_SIZE = + FILE_EIGHT_SIZE + FILE_NINE_SIZE + FILE_TEN_SIZE + FILE_ELEVEN_SIZE; + + private static final long BUCKET_ONE_DATA_SIZE = FILE_ONE_SIZE + + FILE_TWO_SIZE + + FILE_THREE_SIZE; + + private static final long BUCKET_TWO_DATA_SIZE = + FILE_FOUR_SIZE + FILE_FIVE_SIZE; + + + @BeforeEach + public void setUp() throws Exception { + conf = new OzoneConfiguration(); + OMMetadataManager omMetadataManager = initializeNewOmMetadataManager( + Files.createDirectory(temporaryFolder.resolve( + "JunitOmDBDir")).toFile(), conf); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProvider(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + Files.createDirectory(temporaryFolder.resolve( + "omMetadatDir")).toFile()); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder.toFile()) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .addBinding(OzoneStorageContainerManager.class, + getMockReconSCM()) + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(NSSummaryEndpoint.class) + .build(); + ReconNamespaceSummaryManager reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + nsSummaryEndpoint = reconTestInjector.getInstance(NSSummaryEndpoint.class); + + // populate OM DB and reprocess into Recon RocksDB + populateOMDB(); + NSSummaryTaskWithOBS nsSummaryTaskWithOBS = + new NSSummaryTaskWithOBS(reconNamespaceSummaryManager, + reconOMMetadataManager, conf); + nsSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + commonUtils = new CommonUtils(); + } + + @Test + public void testUtility() { + String[] names = EntityHandler.parseRequestPath(TEST_PATH_UTILITY); + assertArrayEquals(TEST_NAMES, names); + String keyName = BucketHandler.getKeyName(names); + assertEquals(TEST_KEY_NAMES, keyName); + String subpath = BucketHandler.buildSubpath(PARENT_DIR, "file1.txt"); + assertEquals(TEST_PATH_UTILITY, subpath); + } + + @Test + public void testGetBasicInfoRoot() throws Exception { + // Test root basics + Response rootResponse = nsSummaryEndpoint.getBasicInfo(ROOT_PATH); + NamespaceSummaryResponse rootResponseObj = + (NamespaceSummaryResponse) rootResponse.getEntity(); + assertEquals(EntityType.ROOT, rootResponseObj.getEntityType()); + assertEquals(2, rootResponseObj.getCountStats().getNumVolume()); + assertEquals(4, rootResponseObj.getCountStats().getNumBucket()); + assertEquals(9, rootResponseObj.getCountStats().getNumTotalKey()); + } + + @Test + public void testGetBasicInfoVol() throws Exception { + // Test volume basics + Response volResponse = nsSummaryEndpoint.getBasicInfo(VOL_PATH); + NamespaceSummaryResponse volResponseObj = + (NamespaceSummaryResponse) volResponse.getEntity(); + assertEquals(EntityType.VOLUME, + volResponseObj.getEntityType()); + assertEquals(2, volResponseObj.getCountStats().getNumBucket()); + assertEquals(5, volResponseObj.getCountStats().getNumTotalKey()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getAdmin()); + assertEquals(TEST_USER, ((VolumeObjectDBInfo) volResponseObj. + getObjectDBInfo()).getOwner()); + assertEquals(VOL, volResponseObj.getObjectDBInfo().getName()); + assertEquals(2097152, volResponseObj.getObjectDBInfo().getQuotaInBytes()); + assertEquals(-1, volResponseObj.getObjectDBInfo().getQuotaInNamespace()); + } + + @Test + public void testGetBasicInfoBucketOne() throws Exception { + // Test bucket 1's basics + Response bucketOneResponse = + nsSummaryEndpoint.getBasicInfo(BUCKET_ONE_PATH); + NamespaceSummaryResponse bucketOneObj = + (NamespaceSummaryResponse) bucketOneResponse.getEntity(); + assertEquals(EntityType.BUCKET, bucketOneObj.getEntityType()); + assertEquals(3, bucketOneObj.getCountStats().getNumTotalKey()); + assertEquals(VOL, + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getVolumeName()); + assertEquals(StorageType.DISK, + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getStorageType()); + assertEquals(getBucketLayout(), + ((BucketObjectDBInfo) + bucketOneObj.getObjectDBInfo()).getBucketLayout()); + assertEquals(BUCKET_ONE, + ((BucketObjectDBInfo) bucketOneObj.getObjectDBInfo()).getName()); + } + + @Test + public void testGetBasicInfoBucketTwo() throws Exception { + // Test bucket 2's basics + commonUtils.testNSSummaryBasicInfoBucketTwo( + BucketLayout.OBJECT_STORE, + nsSummaryEndpoint); + } + + @Test + public void testGetBasicInfoNoPath() throws Exception { + // Test invalid path + commonUtils.testNSSummaryBasicInfoNoPath(nsSummaryEndpoint); + } + + @Test + public void testGetBasicInfoKey() throws Exception { + // Test key + commonUtils.testNSSummaryBasicInfoKey(nsSummaryEndpoint); + } + + @Test + public void testDiskUsageRoot() throws Exception { + // root level DU + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, false); + DUResponse duRootRes = (DUResponse) rootResponse.getEntity(); + assertEquals(2, duRootRes.getCount()); + List duRootData = duRootRes.getDuData(); + // sort based on subpath + Collections.sort(duRootData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duVol1 = duRootData.get(0); + DUResponse.DiskUsage duVol2 = duRootData.get(1); + assertEquals(VOL_PATH, duVol1.getSubpath()); + assertEquals(VOL_TWO_PATH, duVol2.getSubpath()); + assertEquals(VOL_DATA_SIZE, duVol1.getSize()); + assertEquals(VOL_TWO_DATA_SIZE, duVol2.getSize()); + } + + @Test + public void testDiskUsageVolume() throws Exception { + // volume level DU + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, false); + DUResponse duVolRes = (DUResponse) volResponse.getEntity(); + assertEquals(2, duVolRes.getCount()); + List duData = duVolRes.getDuData(); + // sort based on subpath + Collections.sort(duData, + Comparator.comparing(DUResponse.DiskUsage::getSubpath)); + DUResponse.DiskUsage duBucket1 = duData.get(0); + DUResponse.DiskUsage duBucket2 = duData.get(1); + assertEquals(BUCKET_ONE_PATH, duBucket1.getSubpath()); + assertEquals(BUCKET_TWO_PATH, duBucket2.getSubpath()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucket1.getSize()); + assertEquals(BUCKET_TWO_DATA_SIZE, duBucket2.getSize()); + } + + @Test + public void testDiskUsageBucket() throws Exception { + // bucket level DU + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, false); + DUResponse duBucketResponse = (DUResponse) bucketResponse.getEntity(); + // There are no sub-paths under this OBS bucket. + assertEquals(0, duBucketResponse.getCount()); + assertEquals(BUCKET_ONE_DATA_SIZE, duBucketResponse.getSize()); + } + + @Test + public void testDiskUsageKey() throws Exception { + // key level DU + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + false, false); + DUResponse keyObj = (DUResponse) keyResponse.getEntity(); + assertEquals(0, keyObj.getCount()); + assertEquals(FILE_FOUR_SIZE, keyObj.getSize()); + } + + @Test + public void testDiskUsageUnknown() throws Exception { + // invalid path check + Response invalidResponse = nsSummaryEndpoint.getDiskUsage(INVALID_PATH, + false, false); + DUResponse invalidObj = (DUResponse) invalidResponse.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidObj.getStatus()); + } + + @Test + public void testDiskUsageWithReplication() throws Exception { + setUpMultiBlockKey(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(MULTI_BLOCK_KEY_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_KEY_SIZE_WITH_REPLICA, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderRootWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + // withReplica is true + Response rootResponse = nsSummaryEndpoint.getDiskUsage(ROOT_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) rootResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_ROOT, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + + } + + @Test + public void testDataSizeUnderVolWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response volResponse = nsSummaryEndpoint.getDiskUsage(VOL_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) volResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_VOL, + replicaDUResponse.getSizeWithReplica()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getDuData().get(0).getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderBucketWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response bucketResponse = nsSummaryEndpoint.getDiskUsage(BUCKET_ONE_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) bucketResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_BUCKET1, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testDataSizeUnderKeyWithReplication() throws IOException { + setUpMultiBlockReplicatedKeys(); + Response keyResponse = nsSummaryEndpoint.getDiskUsage(KEY_PATH, + false, true); + DUResponse replicaDUResponse = (DUResponse) keyResponse.getEntity(); + assertEquals(ResponseStatus.OK, replicaDUResponse.getStatus()); + assertEquals(MULTI_BLOCK_TOTAL_SIZE_WITH_REPLICA_UNDER_KEY, + replicaDUResponse.getSizeWithReplica()); + } + + @Test + public void testQuotaUsage() throws Exception { + // root level quota usage + Response rootResponse = nsSummaryEndpoint.getQuotaUsage(ROOT_PATH); + QuotaUsageResponse quRootRes = + (QuotaUsageResponse) rootResponse.getEntity(); + assertEquals(ROOT_QUOTA, quRootRes.getQuota()); + assertEquals(ROOT_DATA_SIZE, quRootRes.getQuotaUsed()); + + // volume level quota usage + Response volResponse = nsSummaryEndpoint.getQuotaUsage(VOL_PATH); + QuotaUsageResponse quVolRes = (QuotaUsageResponse) volResponse.getEntity(); + assertEquals(VOL_QUOTA, quVolRes.getQuota()); + assertEquals(VOL_DATA_SIZE, quVolRes.getQuotaUsed()); + + // bucket level quota usage + Response bucketRes = nsSummaryEndpoint.getQuotaUsage(BUCKET_ONE_PATH); + QuotaUsageResponse quBucketRes = (QuotaUsageResponse) bucketRes.getEntity(); + assertEquals(BUCKET_ONE_QUOTA, quBucketRes.getQuota()); + assertEquals(BUCKET_ONE_DATA_SIZE, quBucketRes.getQuotaUsed()); + + Response bucketRes2 = nsSummaryEndpoint.getQuotaUsage(BUCKET_TWO_PATH); + QuotaUsageResponse quBucketRes2 = + (QuotaUsageResponse) bucketRes2.getEntity(); + assertEquals(BUCKET_TWO_QUOTA, quBucketRes2.getQuota()); + assertEquals(BUCKET_TWO_DATA_SIZE, quBucketRes2.getQuotaUsed()); + + // other level not applicable + Response naResponse2 = nsSummaryEndpoint.getQuotaUsage(KEY_PATH); + QuotaUsageResponse quotaUsageResponse2 = + (QuotaUsageResponse) naResponse2.getEntity(); + assertEquals(ResponseStatus.TYPE_NOT_APPLICABLE, + quotaUsageResponse2.getResponseCode()); + + // invalid path request + Response invalidRes = nsSummaryEndpoint.getQuotaUsage(INVALID_PATH); + QuotaUsageResponse invalidResObj = + (QuotaUsageResponse) invalidRes.getEntity(); + assertEquals(ResponseStatus.PATH_NOT_FOUND, + invalidResObj.getResponseCode()); + } + + + @Test + public void testFileSizeDist() throws Exception { + checkFileSizeDist(ROOT_PATH, 2, 3, 3, 1); + checkFileSizeDist(VOL_PATH, 2, 1, 1, 1); + checkFileSizeDist(BUCKET_ONE_PATH, 1, 1, 0, 1); + } + + public void checkFileSizeDist(String path, int bin0, + int bin1, int bin2, int bin3) throws Exception { + Response res = nsSummaryEndpoint.getFileSizeDistribution(path); + FileSizeDistributionResponse fileSizeDistResObj = + (FileSizeDistributionResponse) res.getEntity(); + int[] fileSizeDist = fileSizeDistResObj.getFileSizeDist(); + assertEquals(bin0, fileSizeDist[0]); + assertEquals(bin1, fileSizeDist[1]); + assertEquals(bin2, fileSizeDist[2]); + assertEquals(bin3, fileSizeDist[3]); + for (int i = 4; i < ReconConstants.NUM_OF_FILE_SIZE_BINS; ++i) { + assertEquals(0, fileSizeDist[i]); + } + } + + /** + * Testing the following case. + * ├── vol + * │ ├── bucket1 + * │ │ ├── file1 + * │ │ └── file2 + * │ │ └── file3 + * │ └── bucket2 + * │ ├── file4 + * │ └── file5 + * └── vol2 + * ├── bucket3 + * │ ├── file8 + * │ ├── file9 + * │ └── file10 + * └── bucket4 + * └── file11 + * + * Write these keys to OM and + * replicate them. + */ + @SuppressWarnings("checkstyle:MethodLength") + private void populateOMDB() throws Exception { + + // write all keys + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_TWO_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + FILE_THREE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + FILE_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + FILE_FIVE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + KEY_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_EIGHT_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + KEY_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_NINE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + KEY_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_TEN_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + KEY_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + FILE_ELEVEN_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static OMMetadataManager initializeNewOmMetadataManager( + File omDbDir, OzoneConfiguration omConfiguration) + throws IOException { + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + OMMetadataManager omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_QUOTA) + .build(); + + String volume2Key = omMetadataManager.getVolumeKey(VOL_TWO); + OmVolumeArgs args2 = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_TWO_OBJECT_ID) + .setVolume(VOL_TWO) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .setQuotaInBytes(VOL_TWO_QUOTA) + .build(); + + omMetadataManager.getVolumeTable().put(volumeKey, args); + omMetadataManager.getVolumeTable().put(volume2Key, args2); + + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setQuotaInBytes(BUCKET_ONE_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setQuotaInBytes(BUCKET_TWO_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo3 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_THREE) + .setObjectID(BUCKET_THREE_OBJECT_ID) + .setQuotaInBytes(BUCKET_THREE_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo4 = OmBucketInfo.newBuilder() + .setVolumeName(VOL_TWO) + .setBucketName(BUCKET_FOUR) + .setObjectID(BUCKET_FOUR_OBJECT_ID) + .setQuotaInBytes(BUCKET_FOUR_QUOTA) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo.getVolumeName(), bucketInfo.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + String bucketKey3 = omMetadataManager.getBucketKey( + bucketInfo3.getVolumeName(), bucketInfo3.getBucketName()); + String bucketKey4 = omMetadataManager.getBucketKey( + bucketInfo4.getVolumeName(), bucketInfo4.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + omMetadataManager.getBucketTable().put(bucketKey3, bucketInfo3); + omMetadataManager.getBucketTable().put(bucketKey4, bucketInfo4); + + return omMetadataManager; + } + + private void setUpMultiBlockKey() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup = + getLocationInfoGroup1(); + + // add the multi-block key to Recon's OM + writeKeyToOm(reconOMMetadataManager, + MULTI_BLOCK_FILE, + BUCKET_ONE, + VOL, + MULTI_BLOCK_FILE, + MULTI_BLOCK_KEY_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup), + getBucketLayout(), + FILE_THREE_SIZE); + } + + private OmKeyLocationInfoGroup getLocationInfoGroup1() { + List locationInfoList = new ArrayList<>(); + BlockID block1 = new BlockID(CONTAINER_ONE_ID, 0L); + BlockID block2 = new BlockID(CONTAINER_TWO_ID, 0L); + BlockID block3 = new BlockID(CONTAINER_THREE_ID, 0L); + + OmKeyLocationInfo location1 = new OmKeyLocationInfo.Builder() + .setBlockID(block1) + .setLength(BLOCK_ONE_LENGTH) + .build(); + OmKeyLocationInfo location2 = new OmKeyLocationInfo.Builder() + .setBlockID(block2) + .setLength(BLOCK_TWO_LENGTH) + .build(); + OmKeyLocationInfo location3 = new OmKeyLocationInfo.Builder() + .setBlockID(block3) + .setLength(BLOCK_THREE_LENGTH) + .build(); + locationInfoList.add(location1); + locationInfoList.add(location2); + locationInfoList.add(location3); + + return new OmKeyLocationInfoGroup(0L, locationInfoList); + } + + + private OmKeyLocationInfoGroup getLocationInfoGroup2() { + List locationInfoList = new ArrayList<>(); + BlockID block4 = new BlockID(CONTAINER_FOUR_ID, 0L); + BlockID block5 = new BlockID(CONTAINER_FIVE_ID, 0L); + BlockID block6 = new BlockID(CONTAINER_SIX_ID, 0L); + + OmKeyLocationInfo location4 = new OmKeyLocationInfo.Builder() + .setBlockID(block4) + .setLength(BLOCK_FOUR_LENGTH) + .build(); + OmKeyLocationInfo location5 = new OmKeyLocationInfo.Builder() + .setBlockID(block5) + .setLength(BLOCK_FIVE_LENGTH) + .build(); + OmKeyLocationInfo location6 = new OmKeyLocationInfo.Builder() + .setBlockID(block6) + .setLength(BLOCK_SIX_LENGTH) + .build(); + locationInfoList.add(location4); + locationInfoList.add(location5); + locationInfoList.add(location6); + return new OmKeyLocationInfoGroup(0L, locationInfoList); + + } + + @SuppressWarnings("checkstyle:MethodLength") + private void setUpMultiBlockReplicatedKeys() throws IOException { + OmKeyLocationInfoGroup locationInfoGroup1 = + getLocationInfoGroup1(); + OmKeyLocationInfoGroup locationInfoGroup2 = + getLocationInfoGroup2(); + + //vol/bucket1/file1 + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + FILE_ONE_SIZE); + + //vol/bucket1/file2 + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + FILE_TWO_SIZE); + + //vol/bucket1/file3 + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + FILE_THREE_SIZE); + + //vol/bucket2/file4 + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + FILE_FOUR_SIZE); + + //vol/bucket2/file5 + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + FILE_FIVE_SIZE); + + //vol2/bucket3/file8 + writeKeyToOm(reconOMMetadataManager, + KEY_EIGHT, + BUCKET_THREE, + VOL_TWO, + KEY_EIGHT, + KEY_EIGHT_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + FILE_EIGHT_SIZE); + + //vol2/bucket3/file9 + writeKeyToOm(reconOMMetadataManager, + KEY_NINE, + BUCKET_THREE, + VOL_TWO, + KEY_NINE, + KEY_NINE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + FILE_NINE_SIZE); + + //vol2/bucket3/file10 + writeKeyToOm(reconOMMetadataManager, + KEY_TEN, + BUCKET_THREE, + VOL_TWO, + KEY_TEN, + KEY_TEN_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + BUCKET_THREE_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup2), + getBucketLayout(), + FILE_TEN_SIZE); + + //vol2/bucket4/file11 + writeKeyToOm(reconOMMetadataManager, + KEY_ELEVEN, + BUCKET_FOUR, + VOL_TWO, + KEY_ELEVEN, + KEY_ELEVEN_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + BUCKET_FOUR_OBJECT_ID, + VOL_TWO_OBJECT_ID, + Collections.singletonList(locationInfoGroup1), + getBucketLayout(), + FILE_ELEVEN_SIZE); + } + + /** + * Generate a set of mock container replica with a size of + * replication factor for container. + * + * @param replicationFactor number of replica + * @param containerID the container replicated based upon + * @return a set of container replica for testing + */ + private static Set generateMockContainerReplicas( + int replicationFactor, ContainerID containerID) { + Set result = new HashSet<>(); + for (int i = 0; i < replicationFactor; ++i) { + DatanodeDetails randomDatanode = randomDatanodeDetails(); + ContainerReplica replica = new ContainerReplica.ContainerReplicaBuilder() + .setContainerID(containerID) + .setContainerState( + StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN) + .setDatanodeDetails(randomDatanode) + .build(); + result.add(replica); + } + return result; + } + + private static ReconStorageContainerManagerFacade getMockReconSCM() + throws ContainerNotFoundException { + ReconStorageContainerManagerFacade reconSCM = + mock(ReconStorageContainerManagerFacade.class); + ContainerManager containerManager = mock(ContainerManager.class); + + // Container 1 is 3-way replicated + ContainerID containerID1 = new ContainerID(CONTAINER_ONE_ID); + Set containerReplicas1 = generateMockContainerReplicas( + CONTAINER_ONE_REPLICA_COUNT, containerID1); + when(containerManager.getContainerReplicas(containerID1)) + .thenReturn(containerReplicas1); + + // Container 2 is under replicated with 2 replica + ContainerID containerID2 = new ContainerID(CONTAINER_TWO_ID); + Set containerReplicas2 = generateMockContainerReplicas( + CONTAINER_TWO_REPLICA_COUNT, containerID2); + when(containerManager.getContainerReplicas(containerID2)) + .thenReturn(containerReplicas2); + + // Container 3 is over replicated with 4 replica + ContainerID containerID3 = new ContainerID(CONTAINER_THREE_ID); + Set containerReplicas3 = generateMockContainerReplicas( + CONTAINER_THREE_REPLICA_COUNT, containerID3); + when(containerManager.getContainerReplicas(containerID3)) + .thenReturn(containerReplicas3); + + // Container 4 is replicated with 5 replica + ContainerID containerID4 = new ContainerID(CONTAINER_FOUR_ID); + Set containerReplicas4 = generateMockContainerReplicas( + CONTAINER_FOUR_REPLICA_COUNT, containerID4); + when(containerManager.getContainerReplicas(containerID4)) + .thenReturn(containerReplicas4); + + // Container 5 is replicated with 2 replica + ContainerID containerID5 = new ContainerID(CONTAINER_FIVE_ID); + Set containerReplicas5 = generateMockContainerReplicas( + CONTAINER_FIVE_REPLICA_COUNT, containerID5); + when(containerManager.getContainerReplicas(containerID5)) + .thenReturn(containerReplicas5); + + // Container 6 is replicated with 3 replica + ContainerID containerID6 = new ContainerID(CONTAINER_SIX_ID); + Set containerReplicas6 = generateMockContainerReplicas( + CONTAINER_SIX_REPLICA_COUNT, containerID6); + when(containerManager.getContainerReplicas(containerID6)) + .thenReturn(containerReplicas6); + + when(reconSCM.getContainerManager()).thenReturn(containerManager); + ReconNodeManager mockReconNodeManager = mock(ReconNodeManager.class); + when(mockReconNodeManager.getStats()).thenReturn(getMockSCMRootStat()); + when(reconSCM.getScmNodeManager()).thenReturn(mockReconNodeManager); + return reconSCM; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } + + private static SCMNodeStat getMockSCMRootStat() { + return new SCMNodeStat(ROOT_QUOTA, ROOT_DATA_SIZE, + ROOT_QUOTA - ROOT_DATA_SIZE, 0L, 0L); + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java index 6992c3100fb9..485804240d52 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java @@ -166,7 +166,7 @@ public void setUp() throws Exception { reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); assertNotNull(nsSummaryForBucket1); assertNotNull(nsSummaryForBucket2); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } @Test @@ -233,7 +233,7 @@ public void setUp() throws IOException { assertNotNull(nsSummaryForBucket2); nsSummaryForBucket3 = reconNamespaceSummaryManager.getNSSummary(BUCKET_THREE_OBJECT_ID); - assertNull(nsSummaryForBucket3); + assertNotNull(nsSummaryForBucket3); } private OMUpdateEventBatch processEventBatch() throws IOException { diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java new file mode 100644 index 000000000000..8f9d6b2990a5 --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -0,0 +1,548 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.recon.ReconConstants; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.NSSummary; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.spi.ReconNamespaceSummaryManager; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; + +import java.io.File; +import java.io.IOException; +import java.io.Serializable; +import java.util.Arrays; +import java.util.Set; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeKeyToOm; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getMockOzoneManagerServiceProviderWithFSO; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +/** + * Unit test for NSSummaryTaskWithOBS. + */ +public final class TestNSSummaryTaskWithOBS implements Serializable { + private static ReconNamespaceSummaryManager reconNamespaceSummaryManager; + private static OMMetadataManager omMetadataManager; + private static ReconOMMetadataManager reconOMMetadataManager; + private static NSSummaryTaskWithOBS nSSummaryTaskWithOBS; + private static OzoneConfiguration omConfiguration; + + // Object names + private static final String VOL = "vol"; + private static final String BUCKET_ONE = "bucket1"; + private static final String BUCKET_TWO = "bucket2"; + private static final String KEY_ONE = "key1"; + private static final String KEY_TWO = "key2"; + private static final String KEY_THREE = "dir1/dir2/key3"; + private static final String KEY_FOUR = "key4///////////"; + private static final String KEY_FIVE = "//////////"; + private static final String KEY_SIX = "key6"; + private static final String KEY_SEVEN = "key7"; + + private static final String TEST_USER = "TestUser"; + + private static final long PARENT_OBJECT_ID_ZERO = 0L; + private static final long VOL_OBJECT_ID = 0L; + private static final long BUCKET_ONE_OBJECT_ID = 1L; + private static final long BUCKET_TWO_OBJECT_ID = 2L; + private static final long KEY_ONE_OBJECT_ID = 3L; + private static final long KEY_TWO_OBJECT_ID = 5L; + private static final long KEY_FOUR_OBJECT_ID = 6L; + private static final long KEY_THREE_OBJECT_ID = 8L; + private static final long KEY_FIVE_OBJECT_ID = 9L; + private static final long KEY_SIX_OBJECT_ID = 10L; + private static final long KEY_SEVEN_OBJECT_ID = 11L; + + + private static final long KEY_ONE_SIZE = 500L; + private static final long KEY_TWO_OLD_SIZE = 1025L; + private static final long KEY_TWO_UPDATE_SIZE = 1023L; + private static final long KEY_THREE_SIZE = + ReconConstants.MAX_FILE_SIZE_UPPER_BOUND - 100L; + private static final long KEY_FOUR_SIZE = 2050L; + private static final long KEY_FIVE_SIZE = 100L; + private static final long KEY_SIX_SIZE = 6000L; + private static final long KEY_SEVEN_SIZE = 7000L; + + private TestNSSummaryTaskWithOBS() { + } + + @BeforeAll + public static void setUp(@TempDir File tmpDir) throws Exception { + initializeNewOmMetadataManager(new File(tmpDir, "om")); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + getMockOzoneManagerServiceProviderWithFSO(); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + new File(tmpDir, "recon")); + + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(tmpDir) + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(ozoneManagerServiceProvider) + .withReconSqlDb() + .withContainerDB() + .build(); + reconNamespaceSummaryManager = + reconTestInjector.getInstance(ReconNamespaceSummaryManager.class); + + NSSummary nonExistentSummary = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNull(nonExistentSummary); + + populateOMDB(); + + nSSummaryTaskWithOBS = new NSSummaryTaskWithOBS( + reconNamespaceSummaryManager, + reconOMMetadataManager, omConfiguration); + } + + /** + * Nested class for testing NSSummaryTaskWithOBS reprocess. + */ + @Nested + public class TestReprocess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + @BeforeEach + public void setUp() throws IOException { + // write a NSSummary prior to reprocess + // verify it got cleaned up after. + NSSummary staleNSSummary = new NSSummary(); + RDBBatchOperation rdbBatchOperation = new RDBBatchOperation(); + reconNamespaceSummaryManager.batchStoreNSSummaries(rdbBatchOperation, -1L, + staleNSSummary); + reconNamespaceSummaryManager.commitBatchOperation(rdbBatchOperation); + + // Verify commit + assertNotNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + assertNotNull(nsSummaryForBucket2); + } + + @Test + public void testReprocessNSSummaryNull() throws IOException { + assertNull(reconNamespaceSummaryManager.getNSSummary(-1L)); + } + + @Test + public void testReprocessGetFiles() { + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertEquals(2, nsSummaryForBucket2.getNumOfFiles()); + + assertEquals(KEY_ONE_SIZE + KEY_TWO_OLD_SIZE + KEY_THREE_SIZE, + nsSummaryForBucket1.getSizeOfFiles()); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + @Test + public void testReprocessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {0, 1, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Nested class for testing NSSummaryTaskWithOBS process. + */ + @Nested + public class TestProcess { + + private NSSummary nsSummaryForBucket1; + private NSSummary nsSummaryForBucket2; + + private OMDBUpdateEvent keyEvent1; + private OMDBUpdateEvent keyEvent2; + private OMDBUpdateEvent keyEvent3; + private OMDBUpdateEvent keyEvent4; + + @BeforeEach + public void setUp() throws IOException { + // reinit Recon RocksDB's namespace CF. + reconNamespaceSummaryManager.clearNSSummaryTable(); + nSSummaryTaskWithOBS.reprocessWithOBS(reconOMMetadataManager); + nSSummaryTaskWithOBS.processWithOBS(processEventBatch()); + + nsSummaryForBucket1 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_ONE_OBJECT_ID); + assertNotNull(nsSummaryForBucket1); + nsSummaryForBucket2 = + reconNamespaceSummaryManager.getNSSummary(BUCKET_TWO_OBJECT_ID); + assertNotNull(nsSummaryForBucket2); + } + + private OMUpdateEventBatch processEventBatch() throws IOException { + // Test PUT Event. + // PUT Key6 in Bucket2. + String omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_TWO + + OM_KEY_PREFIX + KEY_SIX; + OmKeyInfo omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_TWO, KEY_SIX, + KEY_SIX, KEY_SIX_OBJECT_ID, BUCKET_TWO_OBJECT_ID, KEY_SIX_SIZE); + keyEvent1 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + // PUT Key7 in Bucket1. + omPutKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_SEVEN; + omPutKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_SEVEN, + KEY_SEVEN, KEY_SEVEN_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_SEVEN_SIZE); + keyEvent2 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omPutKey) + .setValue(omPutKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.PUT) + .build(); + + // Test DELETE Event. + // Delete Key1 in Bucket1. + String omDeleteKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_ONE; + OmKeyInfo omDeleteKeyInfo = buildOmKeyInfo(VOL, BUCKET_ONE, KEY_ONE, + KEY_ONE, KEY_ONE_OBJECT_ID, BUCKET_ONE_OBJECT_ID, KEY_ONE_SIZE); + keyEvent3 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omDeleteKey) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setValue(omDeleteKeyInfo) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) + .build(); + + // Test UPDATE Event. + // Resize Key2 in Bucket1. + String omResizeKey = + OM_KEY_PREFIX + VOL + + OM_KEY_PREFIX + BUCKET_ONE + + OM_KEY_PREFIX + KEY_TWO; + OmKeyInfo oldOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE); + OmKeyInfo newOmResizeKeyInfo = + buildOmKeyInfo(VOL, BUCKET_ONE, KEY_TWO, KEY_TWO, KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, KEY_TWO_OLD_SIZE + 100); + keyEvent4 = new OMDBUpdateEvent. + OMUpdateEventBuilder() + .setKey(omResizeKey) + .setOldValue(oldOmResizeKeyInfo) + .setValue(newOmResizeKeyInfo) + .setTable(omMetadataManager.getKeyTable(getBucketLayout()) + .getName()) + .setAction(OMDBUpdateEvent.OMDBUpdateAction.UPDATE) + .build(); + + return new OMUpdateEventBatch( + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + } + + @Test + public void testProcessForCount() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals(3, nsSummaryForBucket1.getNumOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(3, nsSummaryForBucket2.getNumOfFiles()); + + Set childDirBucket1 = nsSummaryForBucket1.getChildDir(); + assertEquals(0, childDirBucket1.size()); + Set childDirBucket2 = nsSummaryForBucket2.getChildDir(); + assertEquals(0, childDirBucket2.size()); + } + + @Test + public void testProcessForSize() throws IOException { + assertNotNull(nsSummaryForBucket1); + assertEquals( + KEY_THREE_SIZE + KEY_SEVEN_SIZE + KEY_TWO_OLD_SIZE + 100, + nsSummaryForBucket1.getSizeOfFiles()); + assertNotNull(nsSummaryForBucket2); + assertEquals(KEY_FOUR_SIZE + KEY_FIVE_SIZE + KEY_SIX_SIZE, + nsSummaryForBucket2.getSizeOfFiles()); + } + + + @Test + public void testProcessFileBucketSize() { + int[] fileDistBucket1 = nsSummaryForBucket1.getFileSizeBucket(); + int[] fileDistBucket2 = nsSummaryForBucket2.getFileSizeBucket(); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket1.length); + assertEquals(ReconConstants.NUM_OF_FILE_SIZE_BINS, + fileDistBucket2.length); + + // Check for 1's and 0's in fileDistBucket1 + int[] expectedIndexes1 = {1, 3, 40}; + for (int index = 0; index < fileDistBucket1.length; index++) { + if (contains(expectedIndexes1, index)) { + assertEquals(1, fileDistBucket1[index]); + } else { + assertEquals(0, fileDistBucket1[index]); + } + } + + // Check for 1's and 0's in fileDistBucket2 + int[] expectedIndexes2 = {0, 2, 3}; + for (int index = 0; index < fileDistBucket2.length; index++) { + if (contains(expectedIndexes2, index)) { + assertEquals(1, fileDistBucket2[index]); + } else { + assertEquals(0, fileDistBucket2[index]); + } + } + } + + } + + /** + * Populate OMDB with the following configs. + * vol + * / \ + * bucket1 bucket2 + * / \ \ \ \ + * key1 key2 key3 key4 key5 + * + * @throws IOException + */ + private static void populateOMDB() throws IOException { + writeKeyToOm(reconOMMetadataManager, + KEY_ONE, + BUCKET_ONE, + VOL, + KEY_ONE, + KEY_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_ONE_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_TWO, + BUCKET_ONE, + VOL, + KEY_TWO, + KEY_TWO_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_TWO_OLD_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_THREE, + BUCKET_ONE, + VOL, + KEY_THREE, + KEY_THREE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + BUCKET_ONE_OBJECT_ID, + VOL_OBJECT_ID, + KEY_THREE_SIZE, + getBucketLayout()); + + writeKeyToOm(reconOMMetadataManager, + KEY_FOUR, + BUCKET_TWO, + VOL, + KEY_FOUR, + KEY_FOUR_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FOUR_SIZE, + getBucketLayout()); + writeKeyToOm(reconOMMetadataManager, + KEY_FIVE, + BUCKET_TWO, + VOL, + KEY_FIVE, + KEY_FIVE_OBJECT_ID, + PARENT_OBJECT_ID_ZERO, + BUCKET_TWO_OBJECT_ID, + VOL_OBJECT_ID, + KEY_FIVE_SIZE, + getBucketLayout()); + } + + /** + * Create a new OM Metadata manager instance with one user, one vol, and two + * buckets. + * + * @throws IOException ioEx + */ + private static void initializeNewOmMetadataManager( + File omDbDir) + throws IOException { + omConfiguration = new OzoneConfiguration(); + omConfiguration.set(OZONE_OM_DB_DIRS, + omDbDir.getAbsolutePath()); + omConfiguration.set(OMConfigKeys + .OZONE_OM_ENABLE_FILESYSTEM_PATHS, "true"); + omMetadataManager = new OmMetadataManagerImpl( + omConfiguration, null); + + String volumeKey = omMetadataManager.getVolumeKey(VOL); + OmVolumeArgs args = + OmVolumeArgs.newBuilder() + .setObjectID(VOL_OBJECT_ID) + .setVolume(VOL) + .setAdminName(TEST_USER) + .setOwnerName(TEST_USER) + .build(); + omMetadataManager.getVolumeTable().put(volumeKey, args); + + OmBucketInfo bucketInfo1 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_ONE) + .setObjectID(BUCKET_ONE_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + OmBucketInfo bucketInfo2 = OmBucketInfo.newBuilder() + .setVolumeName(VOL) + .setBucketName(BUCKET_TWO) + .setObjectID(BUCKET_TWO_OBJECT_ID) + .setBucketLayout(getBucketLayout()) + .build(); + + String bucketKey = omMetadataManager.getBucketKey( + bucketInfo1.getVolumeName(), bucketInfo1.getBucketName()); + String bucketKey2 = omMetadataManager.getBucketKey( + bucketInfo2.getVolumeName(), bucketInfo2.getBucketName()); + + omMetadataManager.getBucketTable().put(bucketKey, bucketInfo1); + omMetadataManager.getBucketTable().put(bucketKey2, bucketInfo2); + } + + /** + * Build a key info for put/update action. + * @param volume volume name + * @param bucket bucket name + * @param key key name + * @param fileName file name + * @param objectID object ID + * @param parentObjectId parent object ID + * @param dataSize file size + * @return the KeyInfo + */ + private static OmKeyInfo buildOmKeyInfo(String volume, + String bucket, + String key, + String fileName, + long objectID, + long parentObjectId, + long dataSize) { + return new OmKeyInfo.Builder() + .setBucketName(bucket) + .setVolumeName(volume) + .setKeyName(key) + .setFileName(fileName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.ONE)) + .setObjectID(objectID) + .setParentObjectID(parentObjectId) + .setDataSize(dataSize) + .build(); + } + + // Helper method to check if an array contains a specific value + private boolean contains(int[] arr, int value) { + for (int num : arr) { + if (num == value) { + return true; + } + } + return false; + } + + private static BucketLayout getBucketLayout() { + return BucketLayout.OBJECT_STORE; + } +} From 11fddc41911f1cc9ada5d6237439f069318088be Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Fri, 1 Mar 2024 11:20:25 -0800 Subject: [PATCH 086/108] HDDS-10041. Do not start the daemon inside the OzoneManagerDoubleBuffer constructor. (#6300) --- .../om/ratis/OzoneManagerDoubleBuffer.java | 6 ++- .../om/ratis/OzoneManagerStateMachine.java | 13 +++---- ...ManagerProtocolServerSideTranslatorPB.java | 12 +++--- .../OzoneManagerRequestHandler.java | 27 +++---------- .../ozone/protocolPB/RequestHandler.java | 39 +++++++++++-------- .../ozone/om/TestOMMultiTenantManager.java | 2 +- .../ratis/TestOzoneManagerDoubleBuffer.java | 3 +- ...eManagerDoubleBufferWithDummyResponse.java | 3 +- ...zoneManagerDoubleBufferWithOMResponse.java | 3 +- 9 files changed, 53 insertions(+), 55 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index 2c1276c43e73..d3db4120e61b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -211,11 +211,15 @@ private OzoneManagerDoubleBuffer(Builder b) { this.isTracingEnabled = b.isTracingEnabled; - isRunning.set(true); // Daemon thread which runs in background and flushes transactions to DB. daemon = new Daemon(this::flushTransactions); daemon.setName(b.threadPrefix + "OMDoubleBufferFlushThread"); + } + + public OzoneManagerDoubleBuffer start() { daemon.start(); + isRunning.set(true); + return this; } private boolean isRatisEnabled() { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 90fcba40f5d0..ff8e09435a28 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -88,7 +88,7 @@ public class OzoneManagerStateMachine extends BaseStateMachine { private final OzoneManager ozoneManager; private RequestHandler handler; private RaftGroupId raftGroupId; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; + private volatile OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private final ExecutorService executorService; private final ExecutorService installSnapshotExecutor; private final boolean isTracingEnabled; @@ -109,9 +109,7 @@ public OzoneManagerStateMachine(OzoneManagerRatisServer ratisServer, this.threadPrefix = ozoneManager.getThreadNamePrefix(); this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); - - this.handler = new OzoneManagerRequestHandler(ozoneManager, - ozoneManagerDoubleBuffer); + this.handler = new OzoneManagerRequestHandler(ozoneManager); ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) .setNameFormat(threadPrefix + @@ -415,7 +413,6 @@ public synchronized void unpause(long newLastAppliedSnaphsotIndex, if (statePausedCount.decrementAndGet() == 0) { getLifeCycle().startAndTransition(() -> { this.ozoneManagerDoubleBuffer = buildDoubleBufferForRatis(); - handler.updateDoubleBuffer(ozoneManagerDoubleBuffer); this.setLastAppliedTermIndex(TermIndex.valueOf( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); }); @@ -434,7 +431,8 @@ public OzoneManagerDoubleBuffer buildDoubleBufferForRatis() { .setS3SecretManager(ozoneManager.getS3SecretManager()) .enableRatis(true) .enableTracing(isTracingEnabled) - .build(); + .build() + .start(); } /** @@ -524,7 +522,8 @@ public void close() { */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { - OMClientResponse omClientResponse = handler.handleWriteRequest(request, termIndex); + final OMClientResponse omClientResponse = handler.handleWriteRequest( + request, termIndex, ozoneManagerDoubleBuffer); OMLockDetails omLockDetails = omClientResponse.getOmLockDetails(); OMResponse omResponse = omClientResponse.getOMResponse(); if (omLockDetails != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index cf9bb4f0bbce..11d27913ff82 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -57,6 +57,7 @@ import com.google.protobuf.ServiceException; import org.apache.hadoop.ozone.security.S3SecurityUtil; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.ExitUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -109,8 +110,9 @@ public OzoneManagerProtocolServerSideTranslatorPB( : OzoneManagerDoubleBuffer.newBuilder() .setOmMetadataManager(ozoneManager.getMetadataManager()) .enableTracing(TracingUtil.isTracingEnabled(ozoneManager.getConfiguration())) - .build(); - this.handler = new OzoneManagerRequestHandler(impl, ozoneManagerDoubleBuffer); + .build() + .start(); + this.handler = new OzoneManagerRequestHandler(impl); this.omRatisServer = ratisServer; dispatcher = new OzoneProtocolMessageDispatcher<>("OzoneProtocol", metrics, LOG, OMPBHelper::processForDebug, OMPBHelper::processForDebug); @@ -278,7 +280,7 @@ private ServiceException createLeaderNotReadyException() { * Submits request directly to OM. */ private OMResponse submitRequestDirectlyToOM(OMRequest request) { - OMClientResponse omClientResponse; + final OMClientResponse omClientResponse; try { if (OmUtils.isReadOnly(request)) { return handler.handleReadRequest(request); @@ -286,8 +288,8 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { OMClientRequest omClientRequest = createClientRequest(request, ozoneManager); request = omClientRequest.preExecute(ozoneManager); - long index = transactionIndex.incrementAndGet(); - omClientResponse = handler.handleWriteRequest(request, TransactionInfo.getTermIndex(index)); + final TermIndex termIndex = TransactionInfo.getTermIndex(transactionIndex.incrementAndGet()); + omClientResponse = handler.handleWriteRequest(request, termIndex, ozoneManagerDoubleBuffer); } } catch (IOException ex) { // As some preExecute returns error. So handle here. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 2795f3716db8..5acb9f365107 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.Objects; import java.util.UUID; import java.util.stream.Collectors; @@ -67,7 +68,6 @@ import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; -import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -159,7 +159,6 @@ import static org.apache.hadoop.util.MetricUtil.captureLatencyNs; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; -import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.ProtobufUtils; import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; @@ -173,13 +172,10 @@ public class OzoneManagerRequestHandler implements RequestHandler { static final Logger LOG = LoggerFactory.getLogger(OzoneManagerRequestHandler.class); private final OzoneManager impl; - private OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer; private FaultInjector injector; - public OzoneManagerRequestHandler(OzoneManager om, - OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) { + public OzoneManagerRequestHandler(OzoneManager om) { this.impl = om; - this.ozoneManagerDoubleBuffer = ozoneManagerDoubleBuffer; } //TODO simplify it to make it shorter @@ -392,27 +388,14 @@ public OMResponse handleReadRequest(OMRequest request) { } @Override - public OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex) throws IOException { + public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException { injectPause(); OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); return captureLatencyNs( impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), - () -> { - OMClientResponse omClientResponse = - omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex); - Preconditions.checkNotNull(omClientResponse, - "omClientResponse returned by validateAndUpdateCache cannot be null"); - if (omRequest.getCmdType() != Type.Prepare) { - ozoneManagerDoubleBuffer.add(omClientResponse, termIndex); - } - return omClientResponse; - }); - } - - @Override - public void updateDoubleBuffer(OzoneManagerDoubleBuffer omDoubleBuffer) { - this.ozoneManagerDoubleBuffer = omDoubleBuffer; + () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex), + "omClientResponse returned by validateAndUpdateCache cannot be null")); } @VisibleForTesting diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index 17e9f0a7d656..e60362a1ebb3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -20,10 +20,9 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. - OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.ratis.server.protocol.TermIndex; import java.io.IOException; @@ -50,22 +49,30 @@ public interface RequestHandler { void validateRequest(OMRequest omRequest) throws OMException; /** - * Handle write requests. In HA this will be called from - * OzoneManagerStateMachine applyTransaction method. In non-HA this will be - * called from {@link OzoneManagerProtocolServerSideTranslatorPB} for write - * requests. + * Handle write requests. + * In HA this will be called from OzoneManagerStateMachine applyTransaction method. + * In non-HA this will be called from {@link OzoneManagerProtocolServerSideTranslatorPB}. * - * @param omRequest - * @param termIndex - ratis transaction log (term, index) + * @param omRequest the write request + * @param termIndex - ratis transaction term and index + * @param ozoneManagerDoubleBuffer for adding response * @return OMClientResponse */ - OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex) throws IOException; + default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex, + OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) throws IOException { + final OMClientResponse response = handleWriteRequestImpl(omRequest, termIndex); + if (omRequest.getCmdType() != Type.Prepare) { + ozoneManagerDoubleBuffer.add(response, termIndex); + } + return response; + } /** - * Update the OzoneManagerDoubleBuffer. This will be called when - * stateMachine is unpaused and set with new doublebuffer object. - * @param ozoneManagerDoubleBuffer + * Implementation of {@link #handleWriteRequest(OMRequest, TermIndex, OzoneManagerDoubleBuffer)}. + * + * @param omRequest the write request + * @param termIndex - ratis transaction term and index + * @return OMClientResponse */ - void updateDoubleBuffer(OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer); - + OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java index 0079585a85b6..a4ced424522b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java @@ -156,7 +156,7 @@ public void testMultiTenancyRequestsWhenDisabled() throws IOException { // Check that Multi-Tenancy read requests are blocked when not enabled final OzoneManagerRequestHandler ozoneManagerRequestHandler = - new OzoneManagerRequestHandler(ozoneManager, null); + new OzoneManagerRequestHandler(ozoneManager); expectReadRequestToFail(ozoneManagerRequestHandler, OMRequestTestUtils.listUsersInTenantRequest(tenantId)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index 1890958cbaad..e4dd0ab5ada8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -137,7 +137,8 @@ public void setup() throws IOException { .setMaxUnFlushedTransactionCount(1000) .enableRatis(true) .setFlushNotifier(spyFlushNotifier) - .build(); + .build() + .start(); doNothing().when(omKeyCreateResponse).checkAndUpdateDB(any(), any()); doNothing().when(omBucketCreateResponse).checkAndUpdateDB(any(), any()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index dd8e642721e6..61be29eade6d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -79,7 +79,8 @@ public void setup() throws IOException { .setOmMetadataManager(omMetadataManager) .setMaxUnFlushedTransactionCount(10000) .enableRatis(true) - .build(); + .build() + .start(); } @AfterEach diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index d0e814a78265..54b04260d556 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -112,7 +112,8 @@ public void setup() throws IOException { .setOmMetadataManager(omMetadataManager) .setMaxUnFlushedTransactionCount(100000) .enableRatis(true) - .build(); + .build() + .start(); } @AfterEach From 3a872b4a108b292486dfe687fc5be3cf2fa25461 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 1 Mar 2024 21:41:33 +0100 Subject: [PATCH 087/108] HDDS-10447. Extract helper methods from Ozone native ACL unit tests (#6306) --- .../security/acl/OzoneNativeAclTestUtil.java | 170 ++++++++++++++++++ .../acl/TestOzoneNativeAuthorizer.java | 36 +--- .../ozone/security/acl/TestParentAcl.java | 69 ++----- 3 files changed, 186 insertions(+), 89 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java new file mode 100644 index 000000000000..23f21e9cdaed --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/OzoneNativeAclTestUtil.java @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.security.acl; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; + +import java.io.IOException; +import java.util.List; + +/** Helper for ACL tests. */ +final class OzoneNativeAclTestUtil { + + public static void addVolumeAcl( + OMMetadataManager metadataManager, + String volume, + OzoneAcl ozoneAcl + ) throws IOException { + final String volumeKey = metadataManager.getVolumeKey(volume); + final Table volumeTable = metadataManager.getVolumeTable(); + final OmVolumeArgs omVolumeArgs = volumeTable.get(volumeKey); + + omVolumeArgs.addAcl(ozoneAcl); + + volumeTable.addCacheEntry( + new CacheKey<>(volumeKey), + CacheValue.get(1L, omVolumeArgs)); + } + + public static void addBucketAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + OzoneAcl ozoneAcl) throws IOException { + final String bucketKey = metadataManager.getBucketKey(volume, bucket); + final Table bucketTable = metadataManager.getBucketTable(); + final OmBucketInfo omBucketInfo = bucketTable.get(bucketKey); + + omBucketInfo.addAcl(ozoneAcl); + + bucketTable.addCacheEntry( + new CacheKey<>(bucketKey), + CacheValue.get(1L, omBucketInfo)); + } + + public static void addKeyAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key, + OzoneAcl ozoneAcl + ) throws IOException { + final String objKey = metadataManager.getOzoneKey(volume, bucket, key); + final Table keyTable = metadataManager.getKeyTable(bucketLayout); + final OmKeyInfo omKeyInfo = keyTable.get(objKey); + + omKeyInfo.addAcl(ozoneAcl); + + keyTable.addCacheEntry( + new CacheKey<>(objKey), + CacheValue.get(1L, omKeyInfo)); + } + + public static void setVolumeAcl( + OMMetadataManager metadataManager, + String volume, + List ozoneAcls) throws IOException { + final String volumeKey = metadataManager.getVolumeKey(volume); + final Table volumeTable = metadataManager.getVolumeTable(); + final OmVolumeArgs omVolumeArgs = volumeTable.get(volumeKey); + + omVolumeArgs.setAcls(ozoneAcls); + + volumeTable.addCacheEntry( + new CacheKey<>(volumeKey), + CacheValue.get(1L, omVolumeArgs)); + } + + public static void setBucketAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + List ozoneAcls) throws IOException { + final String bucketKey = metadataManager.getBucketKey(volume, bucket); + final Table bucketTable = metadataManager.getBucketTable(); + final OmBucketInfo omBucketInfo = bucketTable.get(bucketKey); + + omBucketInfo.setAcls(ozoneAcls); + + bucketTable.addCacheEntry( + new CacheKey<>(bucketKey), + CacheValue.get(1L, omBucketInfo)); + } + + public static void setKeyAcl( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key, + List ozoneAcls) throws IOException { + final String objKey = metadataManager.getOzoneKey(volume, bucket, key); + final Table keyTable = metadataManager.getKeyTable(bucketLayout); + final OmKeyInfo omKeyInfo = keyTable.get(objKey); + + omKeyInfo.setAcls(ozoneAcls); + + keyTable.addCacheEntry( + new CacheKey<>(objKey), + CacheValue.get(1L, omKeyInfo)); + } + + public static List getVolumeAcls( + OMMetadataManager metadataManager, + String volume + ) throws IOException { + return metadataManager.getVolumeTable() + .get(metadataManager.getVolumeKey(volume)) + .getAcls(); + } + + public static List getBucketAcls( + OMMetadataManager metadataManager, + String volume, + String bucket + ) throws IOException { + return metadataManager.getBucketTable() + .get(metadataManager.getBucketKey(volume, bucket)) + .getAcls(); + } + + public static List getKeyAcls( + OMMetadataManager metadataManager, + String volume, + String bucket, + BucketLayout bucketLayout, + String key + ) throws IOException { + return metadataManager.getKeyTable(bucketLayout) + .get(metadataManager.getOzoneKey(volume, bucket, key)) + .getAcls(); + } + + private OzoneNativeAclTestUtil() { + // utilities + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index f5bb8d35350b..52dea922d53b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -21,8 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.OzoneAdmins; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; @@ -314,45 +312,19 @@ public void testCheckAccessForPrefix( private void setVolumeAcl(List ozoneAcls) throws IOException { - String volumeKey = metadataManager.getVolumeKey(volObj.getVolumeName()); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.setAcls(ozoneAcls); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.setVolumeAcl(metadataManager, vol, ozoneAcls); } private void setBucketAcl(List ozoneAcls) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.setAcls(ozoneAcls); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls); } private void addVolumeAcl(OzoneAcl ozoneAcl) throws IOException { - String volumeKey = metadataManager.getVolumeKey(volObj.getVolumeName()); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.addAcl(ozoneAcl); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.addVolumeAcl(metadataManager, vol, ozoneAcl); } private void addBucketAcl(OzoneAcl ozoneAcl) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.addAcl(ozoneAcl); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.addBucketAcl(metadataManager, vol, buck, ozoneAcl); } private void resetAclsAndValidateAccess( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index f17d477bd793..f5220df1783e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.server.OzoneAdmins; -import org.apache.hadoop.hdds.utils.db.cache.CacheKey; -import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; @@ -34,7 +32,6 @@ import org.apache.hadoop.ozone.om.VolumeManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; @@ -265,88 +262,46 @@ private void testParentChild(OzoneObj child, } private void addVolumeAcl(String vol, OzoneAcl ozoneAcl) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.addAcl(ozoneAcl); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.addVolumeAcl(metadataManager, vol, ozoneAcl); } private List getVolumeAcls(String vol) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = - metadataManager.getVolumeTable().get(volumeKey); - - return omVolumeArgs.getAcls(); + return OzoneNativeAclTestUtil.getVolumeAcls(metadataManager, vol); } private void setVolumeAcl(String vol, List ozoneAcls) throws IOException { - String volumeKey = metadataManager.getVolumeKey(vol); - OmVolumeArgs omVolumeArgs = metadataManager.getVolumeTable().get(volumeKey); - - omVolumeArgs.setAcls(ozoneAcls); - - metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(volumeKey), - CacheValue.get(1L, omVolumeArgs)); + OzoneNativeAclTestUtil.setVolumeAcl(metadataManager, vol, ozoneAcls); } private void addKeyAcl(String vol, String buck, String key, OzoneAcl ozoneAcl) throws IOException { - String objKey = metadataManager.getOzoneKey(vol, buck, key); - OmKeyInfo omKeyInfo = - metadataManager.getKeyTable(getBucketLayout()).get(objKey); - - omKeyInfo.addAcl(ozoneAcl); - - metadataManager.getKeyTable(getBucketLayout()) - .addCacheEntry(new CacheKey<>(objKey), - CacheValue.get(1L, omKeyInfo)); + OzoneNativeAclTestUtil.addKeyAcl(metadataManager, vol, buck, getBucketLayout(), key, ozoneAcl); } private void setKeyAcl(String vol, String buck, String key, List ozoneAcls) throws IOException { - String objKey = metadataManager.getOzoneKey(vol, buck, key); - OmKeyInfo omKeyInfo = - metadataManager.getKeyTable(getBucketLayout()).get(objKey); - omKeyInfo.setAcls(ozoneAcls); - - metadataManager.getKeyTable(getBucketLayout()) - .addCacheEntry(new CacheKey<>(objKey), - CacheValue.get(1L, omKeyInfo)); + OzoneNativeAclTestUtil.setKeyAcl(metadataManager, vol, buck, getBucketLayout(), key, ozoneAcls); } private void addBucketAcl(String vol, String buck, OzoneAcl ozoneAcl) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.addAcl(ozoneAcl); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.addBucketAcl(metadataManager, vol, buck, ozoneAcl); } private List getBucketAcls(String vol, String buck) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); + return OzoneNativeAclTestUtil.getBucketAcls(metadataManager, vol, buck); + } - return omBucketInfo.getAcls(); + private List getKeyAcls(String vol, String buck, String key) + throws IOException { + return OzoneNativeAclTestUtil.getKeyAcls(metadataManager, vol, buck, getBucketLayout(), key); } private void setBucketAcl(String vol, String buck, List ozoneAcls) throws IOException { - String bucketKey = metadataManager.getBucketKey(vol, buck); - OmBucketInfo omBucketInfo = metadataManager.getBucketTable().get(bucketKey); - - omBucketInfo.setAcls(ozoneAcls); - - metadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey), - CacheValue.get(1L, omBucketInfo)); + OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls); } private static OzoneObjInfo createVolume(String volumeName) From 2710129ce41422c81feeae6d5a21773ccc6707c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 13:50:12 +0100 Subject: [PATCH 088/108] HDDS-10453. Bump httpclient to 4.5.14 (#6311) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1f8af9f6b677..fe1bbb5147e9 100644 --- a/pom.xml +++ b/pom.xml @@ -175,7 +175,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.6.1 - 4.5.13 + 4.5.14 4.4.16 From 61dbb0870ae0cc2f1aa824fd4586599566683f9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 15:32:09 +0100 Subject: [PATCH 089/108] HDDS-10455. Bump protobuf-maven-plugin to 0.6.1 (#6313) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fe1bbb5147e9..318ac647db04 100644 --- a/pom.xml +++ b/pom.xml @@ -223,7 +223,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.7.2 - 0.5.1 + 0.6.1 3.19.6 1.7.1 From 11c5eb86a4f43c4297a39a496051173e420b9edc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Mar 2024 17:31:38 +0100 Subject: [PATCH 090/108] HDDS-10456. Bump slf4j to 2.0.12 (#6312) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 318ac647db04..4de8939083a0 100644 --- a/pom.xml +++ b/pom.xml @@ -179,7 +179,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 4.4.16 - 2.0.10 + 2.0.12 2.17.1 3.4.2 1.2.25 From b513cdcb545bf915eab20ebedf162d7ba2e4304c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 4 Mar 2024 11:44:59 +0100 Subject: [PATCH 091/108] HDDS-10439. Remove setConf from MiniOzoneCluster public interface (#6320) --- .../java/org/apache/hadoop/ozone/MiniOzoneCluster.java | 5 ----- .../java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java | 3 +-- .../ozone/client/rpc/TestOzoneRpcClientAbstract.java | 1 - .../apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java | 1 - .../org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java | 3 --- .../hadoop/ozone/om/TestOmSnapshotDisabledRestart.java | 3 --- .../ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java | 7 ++----- .../hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java | 3 +-- 8 files changed, 4 insertions(+), 22 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index b10b021b69e8..9f689c6e0e4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -73,11 +73,6 @@ static MiniOzoneHAClusterImpl.Builder newHABuilder(OzoneConfiguration conf) { */ OzoneConfiguration getConf(); - /** - * Set the configuration for the MiniOzoneCluster. - */ - void setConf(OzoneConfiguration newConf); - /** * Waits for the cluster to be ready, this call blocks till all the * configured {@link HddsDatanodeService} registers with diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 859ce4740348..6bc80cba15ad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -164,8 +164,7 @@ public OzoneConfiguration getConf() { return conf; } - @Override - public void setConf(OzoneConfiguration newConf) { + protected void setConf(OzoneConfiguration newConf) { this.conf = newConf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index da41561f8ce2..b0d3609651fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -1333,7 +1333,6 @@ public void testMissingParentBucketUsedNamespace(BucketLayout layout) if (layout.equals(BucketLayout.LEGACY)) { OzoneConfiguration conf = cluster.getConf(); conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true); - cluster.setConf(conf); } // the directory "/dir1", ""/dir1/dir2/", "/dir1/dir2/dir3/" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java index ff57a1e7bbe9..da0f82d4707b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java @@ -303,7 +303,6 @@ public void testForceBootstrap() throws Exception { config.setInt(OMConfigKeys.OZONE_OM_ADMIN_PROTOCOL_MAX_RETRIES_KEY, 2); config.setInt( OMConfigKeys.OZONE_OM_ADMIN_PROTOCOL_WAIT_BETWEEN_RETRIES_KEY, 100); - cluster.setConf(config); GenericTestUtils.LogCapturer omLog = GenericTestUtils.LogCapturer.captureLogs(OzoneManager.LOG); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java index df9a4ddfe227..cb49d273e78d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabled.java @@ -64,9 +64,6 @@ public static void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = cluster.getOMLeader(); - OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); store = client.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java index 8a5ae0234910..7f325f6c3e0d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java @@ -61,9 +61,6 @@ public static void init() throws Exception { cluster.waitForClusterToBeReady(); client = cluster.newClient(); - OzoneManager leaderOzoneManager = cluster.getOMLeader(); - OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); store = client.getObjectStore(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index 734cf912cb16..5694edd773ea 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -116,10 +116,6 @@ public static void init() throws Exception { .build(); cluster.waitForClusterToBeReady(); - ozoneManager = cluster.getOzoneManager(); - final OzoneConfiguration ozoneManagerConf = ozoneManager.getConfiguration(); - cluster.setConf(ozoneManagerConf); - final String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + omServiceId; final OzoneConfiguration clientConf = new OzoneConfiguration(cluster.getConf()); @@ -128,12 +124,13 @@ public static void init() throws Exception { client = cluster.newClient(); objectStore = client.getObjectStore(); + ozoneManager = cluster.getOzoneManager(); final KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(ozoneManager, "keyManager"); // stop the deletion services so that keys can still be read keyManager.stop(); - OMStorage.getOmDbDir(ozoneManagerConf); + OMStorage.getOmDbDir(cluster.getConf()); } @AfterAll diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index a88290bfb8d8..4cd2f98c2b8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -113,10 +113,9 @@ public void init() throws Exception { leaderOzoneManager = cluster.getOMLeader(); OzoneConfiguration leaderConfig = leaderOzoneManager.getConfiguration(); - cluster.setConf(leaderConfig); String hostPrefix = OZONE_OFS_URI_SCHEME + "://" + serviceID; - clientConf = new OzoneConfiguration(cluster.getConf()); + clientConf = new OzoneConfiguration(leaderConfig); clientConf.set(FS_DEFAULT_NAME_KEY, hostPrefix); client = cluster.newClient(); From a65991fa3325676554efb47fdfd7ccc71a7b4cb9 Mon Sep 17 00:00:00 2001 From: jianghuazhu <740087514@qq.com> Date: Mon, 4 Mar 2024 06:21:51 -0600 Subject: [PATCH 092/108] HDDS-10444. Reduce string concatenation in ContainerImporter#importContainer (#6307) --- .../container/replication/ContainerImporter.java | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java index 1929c16089b0..f20094079c9e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ContainerImporter.java @@ -95,18 +95,17 @@ public void importContainer(long containerID, Path tarFilePath, throws IOException { if (!importContainerProgress.add(containerID)) { deleteFileQuietely(tarFilePath); - LOG.warn("Container import in progress with container Id {}", - containerID); - throw new StorageContainerException("Container " + - "import in progress with container Id " + containerID, + String log = "Container import in progress with container Id " + containerID; + LOG.warn(log); + throw new StorageContainerException(log, ContainerProtos.Result.CONTAINER_EXISTS); } try { if (containerSet.getContainer(containerID) != null) { - LOG.warn("Container already exists with container Id {}", containerID); - throw new StorageContainerException("Container already exists " + - "with container Id " + containerID, + String log = "Container already exists with container Id " + containerID; + LOG.warn(log); + throw new StorageContainerException(log, ContainerProtos.Result.CONTAINER_EXISTS); } From 650e77753b50fa4cc52f097be28416f888006635 Mon Sep 17 00:00:00 2001 From: Slava Tutrinov Date: Mon, 4 Mar 2024 15:26:00 +0300 Subject: [PATCH 093/108] HDDS-10459. Bump snappy-java to 1.1.10.5 (#6324) Fixes: - CVE-2023-34453 - CVE-2023-34454 - CVE-2023-34455 --- hadoop-hdds/hadoop-dependency-client/pom.xml | 8 ++++++++ hadoop-hdds/hadoop-dependency-server/pom.xml | 8 ++++++++ pom.xml | 6 ++++++ 3 files changed, 22 insertions(+) diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index d2a8372bdd17..f29232090fdf 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -43,6 +43,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + org.xerial.snappy + snappy-java + org.apache.hadoop hadoop-annotations @@ -290,5 +294,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.xerial.snappy + snappy-java + diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index feaf3de5a11a..82e4c33325e2 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -43,6 +43,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} + + org.xerial.snappy + snappy-java + org.apache.curator * @@ -138,5 +142,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.xerial.snappy + snappy-java + diff --git a/pom.xml b/pom.xml index 4de8939083a0..898b675893a3 100644 --- a/pom.xml +++ b/pom.xml @@ -306,6 +306,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.2.1 3.9.6 + 1.1.10.5 @@ -1548,6 +1549,11 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs mockito-inline ${mockito.version} + + org.xerial.snappy + snappy-java + ${snappy-java.version} + From a248ed1ea421770bb22fa3ce58906194f793406c Mon Sep 17 00:00:00 2001 From: jyotirmoy-gh <69982926+jyotirmoy-gh@users.noreply.github.com> Date: Tue, 5 Mar 2024 10:23:06 +0530 Subject: [PATCH 094/108] HDDS-10329. [Snapshot] Add unit-test for recreating snapshots with deleted snapshot names. (#6298) --- .../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 83 +++++++++++++++++-- .../ozone/om/snapshot/TestOmSnapshot.java | 59 ++++++++++++- 2 files changed, 132 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index 7afdf7144f05..8e0bd1ac7deb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -24,12 +24,14 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.om.KeyManagerImpl; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -47,9 +49,11 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -66,12 +70,13 @@ class TestOzoneFsSnapshot { private static final String OM_SERVICE_ID = "om-service-test1"; private static OzoneManager ozoneManager; private static OzoneFsShell shell; + private static AtomicInteger counter = new AtomicInteger(); private static final String VOLUME = - "vol-" + RandomStringUtils.randomNumeric(5); + "vol-" + counter.incrementAndGet(); private static final String BUCKET = - "buck-" + RandomStringUtils.randomNumeric(5); + "buck-" + counter.incrementAndGet(); private static final String KEY = - "key-" + RandomStringUtils.randomNumeric(5); + "key-" + counter.incrementAndGet(); private static final String BUCKET_PATH = OM_KEY_PREFIX + VOLUME + OM_KEY_PREFIX + BUCKET; private static final String BUCKET_WITH_SNAPSHOT_INDICATOR_PATH = @@ -84,6 +89,8 @@ static void initClass() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); // Start the cluster cluster = MiniOzoneCluster.newHABuilder(conf) @@ -128,7 +135,7 @@ private static void createVolBuckKey() @Test void testCreateSnapshotDuplicateName() throws Exception { - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); int res = ToolRunner.run(shell, new String[]{"-createSnapshot", BUCKET_PATH, snapshotName}); @@ -152,7 +159,7 @@ void testCreateSnapshotWithSubDirInput() throws Exception { // rather than: // Created snapshot ofs://om/vol1/buck2/dir3/.snapshot/snap1 - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); String dirPath = BUCKET_PATH + "/dir1/"; @@ -257,7 +264,7 @@ void testCreateSnapshotFailure(String description, */ @Test void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { - String key1 = "key-" + RandomStringUtils.randomNumeric(5); + String key1 = "key-" + counter.incrementAndGet(); String newKeyPath = BUCKET_PATH + OM_KEY_PREFIX + key1; // Pause SnapshotDeletingService so that Snapshot marked deleted is not reclaimed. ozoneManager.getKeyManager().getSnapshotDeletingService().suspend(); @@ -274,7 +281,7 @@ void testFsLsSnapshot(@TempDir Path tempDir) throws Exception { String snapshotPath1 = BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snapshotName1; - String key2 = "key-" + RandomStringUtils.randomNumeric(5); + String key2 = "key-" + counter.incrementAndGet(); String newKeyPath2 = BUCKET_PATH + OM_KEY_PREFIX + key2; execShellCommandAndGetOutput(0, new String[]{"-put", tempFile.toString(), newKeyPath2}); @@ -413,6 +420,64 @@ void testSnapshotDeleteFailure(String description, assertThat(errorMessage).contains(expectedMessage); } + @Test + public void testSnapshotReuseSnapName() throws Exception { + String key1 = "key-" + counter.incrementAndGet(); + int res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key1}); + assertEquals(0, res); + + String snap1 = "snap" + counter.incrementAndGet(); + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap1}); + // Asserts that create request succeeded + assertEquals(0, res); + + String listSnapOut = execShellCommandAndGetOutput(0, + new String[]{"-ls", BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snap1}); + assertThat(listSnapOut).contains(key1); + + res = ToolRunner.run(shell, + new String[]{"-deleteSnapshot", BUCKET_PATH, snap1}); + // Asserts that delete request succeeded + assertEquals(0, res); + + GenericTestUtils.waitFor(() -> { + try { + return !ozoneManager.getMetadataManager().getSnapshotInfoTable() + .isExist(SnapshotInfo.getTableKey(VOLUME, BUCKET, snap1)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 200, 10000); + + String key2 = "key-" + counter.incrementAndGet(); + res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key2}); + assertEquals(0, res); + String snap2 = "snap" + counter.incrementAndGet(); + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap2}); + // Asserts that create request succeeded + assertEquals(0, res); + + String key3 = "key-" + counter.incrementAndGet(); + res = ToolRunner.run(shell, new String[]{"-touch", + BUCKET_PATH + OM_KEY_PREFIX + key3}); + assertEquals(0, res); + + res = ToolRunner.run(shell, + new String[]{"-createSnapshot", BUCKET_PATH, snap1}); + // Asserts that create request succeeded + assertEquals(0, res); + + listSnapOut = execShellCommandAndGetOutput(0, + new String[]{"-ls", BUCKET_WITH_SNAPSHOT_INDICATOR_PATH + OM_KEY_PREFIX + snap1}); + assertThat(listSnapOut).contains(key1); + assertThat(listSnapOut).contains(key2); + assertThat(listSnapOut).contains(key3); + } + /** * Execute a shell command with provided arguments * and return a string of the output. @@ -453,7 +518,7 @@ private String execShellCommandAndGetOutput( } private String createSnapshot() throws Exception { - String snapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String snapshotName = "snap-" + counter.incrementAndGet(); // Create snapshot int res = ToolRunner.run(shell, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 8021b959849e..b4e06c034269 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -102,6 +102,7 @@ import java.util.Iterator; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.regex.Matcher; @@ -111,6 +112,7 @@ import static org.apache.commons.lang3.StringUtils.leftPad; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone; import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -118,6 +120,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_DIFF_DISABLE_NATIVE_LIBS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; @@ -172,6 +175,7 @@ public abstract class TestOmSnapshot { private ObjectStore store; private OzoneManager ozoneManager; private OzoneBucket ozoneBucket; + private OzoneConfiguration conf; private final BucketLayout bucketLayout; private final boolean enabledFileSystemPaths; @@ -196,7 +200,7 @@ public TestOmSnapshot(BucketLayout newBucketLayout, * Create a MiniDFSCluster for testing. */ private void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); + conf = new OzoneConfiguration(); conf.setBoolean(OZONE_OM_ENABLE_FILESYSTEM_PATHS, enabledFileSystemPaths); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); conf.setBoolean(OZONE_OM_SNAPSHOT_FORCE_FULL_DIFF, forceFullSnapshotDiff); @@ -209,6 +213,8 @@ private void init() throws Exception { // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); conf.setInt(OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION, OMLayoutFeature.BUCKET_LAYOUT_SUPPORT.layoutVersion()); + conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + conf.setInt(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, KeyManagerImpl.DISABLE_VALUE); cluster = MiniOzoneCluster.newBuilder(conf) .build(); @@ -236,6 +242,12 @@ private void stopKeyManager() throws IOException { keyManager.stop(); } + private void startKeyManager() throws IOException { + KeyManagerImpl keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils + .getInternalState(ozoneManager, "keyManager"); + keyManager.start(conf); + } + private RDBStore getRdbStore() { return (RDBStore) ozoneManager.getMetadataManager().getStore(); } @@ -2481,4 +2493,49 @@ public void testSnapshotCompactionDag() throws Exception { fetchReportPage(volume1, bucket3, "bucket3-snap1", "bucket3-snap3", null, 0).getDiffList().size()); } + + @Test + public void testSnapshotReuseSnapName() throws Exception { + // start KeyManager for this test + startKeyManager(); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); + store.createVolume(volume); + OzoneVolume volume1 = store.getVolume(volume); + volume1.createBucket(bucket); + OzoneBucket bucket1 = volume1.getBucket(bucket); + // Create Key1 and take snapshot + String key1 = "key-1-"; + createFileKeyWithPrefix(bucket1, key1); + String snap1 = "snap" + counter.incrementAndGet(); + String snapshotKeyPrefix = createSnapshot(volume, bucket, snap1); + + int keyCount1 = keyCount(bucket1, snapshotKeyPrefix + "key-"); + assertEquals(1, keyCount1); + + store.deleteSnapshot(volume, bucket, snap1); + + GenericTestUtils.waitFor(() -> { + try { + return !ozoneManager.getMetadataManager().getSnapshotInfoTable() + .isExist(SnapshotInfo.getTableKey(volume, bucket, snap1)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }, 200, 10000); + + createFileKeyWithPrefix(bucket1, key1); + String snap2 = "snap" + counter.incrementAndGet(); + createSnapshot(volume, bucket, snap2); + + String key2 = "key-2-"; + createFileKeyWithPrefix(bucket1, key2); + createSnapshot(volume, bucket, snap1); + + int keyCount2 = keyCount(bucket1, snapshotKeyPrefix + "key-"); + assertEquals(3, keyCount2); + + // Stop key manager after testcase executed + stopKeyManager(); + } } From ff1e4143ad22ae56a9476c180bb56cdb3556c487 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 5 Mar 2024 09:50:50 +0100 Subject: [PATCH 095/108] HDDS-10430. Race condition around Pipeline#nodesInOrder (#6316) --- .../common/helpers/AllocatedBlock.java | 14 +- .../hadoop/hdds/scm/pipeline/Pipeline.java | 120 ++++++++---------- .../scm/server/SCMBlockProtocolServer.java | 9 +- .../hdds/scm/TestXceiverClientGrpc.java | 2 +- .../hadoop/ozone/om/KeyManagerImpl.java | 4 +- 5 files changed, 79 insertions(+), 70 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java index 7ac0401af117..5a1d8f90ea84 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java @@ -26,8 +26,8 @@ * contains a Pipeline and the key. */ public final class AllocatedBlock { - private Pipeline pipeline; - private ContainerBlockID containerBlockID; + private final Pipeline pipeline; + private final ContainerBlockID containerBlockID; /** * Builder for AllocatedBlock. @@ -63,4 +63,14 @@ public Pipeline getPipeline() { public ContainerBlockID getBlockID() { return containerBlockID; } + + public static Builder newBuilder() { + return new Builder(); + } + + public Builder toBuilder() { + return new Builder() + .setContainerBlockID(containerBlockID) + .setPipeline(pipeline); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 9d95cee48366..05d83a8b8b56 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -34,6 +34,8 @@ import java.util.UUID; import com.fasterxml.jackson.annotation.JsonIgnore; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; @@ -76,10 +78,10 @@ public static Codec getCodec() { private final ReplicationConfig replicationConfig; private final PipelineState state; - private Map nodeStatus; - private Map replicaIndexes; + private final Map nodeStatus; + private final Map replicaIndexes; // nodes with ordered distance to client - private List nodesInOrder = new ArrayList<>(); + private final ImmutableList nodesInOrder; // Current reported Leader for the pipeline private UUID leaderId; // Timestamp for pipeline upon creation @@ -103,17 +105,17 @@ public static Codec getCodec() { * set to Instant.now when you crate the Pipeline object as part of * state change. */ - private Pipeline(PipelineID id, - ReplicationConfig replicationConfig, PipelineState state, - Map nodeStatus, UUID suggestedLeaderId) { - this.id = id; - this.replicationConfig = replicationConfig; - this.state = state; - this.nodeStatus = nodeStatus; - this.creationTimestamp = Instant.now(); - this.suggestedLeaderId = suggestedLeaderId; - this.replicaIndexes = new HashMap<>(); - this.stateEnterTime = Instant.now(); + private Pipeline(Builder b) { + id = b.id; + replicationConfig = b.replicationConfig; + state = b.state; + leaderId = b.leaderId; + suggestedLeaderId = b.suggestedLeaderId; + nodeStatus = b.nodeStatus; + nodesInOrder = b.nodesInOrder != null ? ImmutableList.copyOf(b.nodesInOrder) : ImmutableList.of(); + replicaIndexes = b.replicaIndexes != null ? ImmutableMap.copyOf(b.replicaIndexes) : ImmutableMap.of(); + creationTimestamp = b.creationTimestamp != null ? b.creationTimestamp : Instant.now(); + stateEnterTime = Instant.now(); } /** @@ -310,19 +312,6 @@ public boolean isOpen() { return state == PipelineState.OPEN; } - public boolean isAllocationTimeout() { - //TODO: define a system property to control the timeout value - return false; - } - - public void setNodesInOrder(List nodes) { - nodesInOrder.clear(); - if (null == nodes) { - return; - } - nodesInOrder.addAll(nodes); - } - public List getNodesInOrder() { if (nodesInOrder.isEmpty()) { LOG.debug("Nodes in order is empty, delegate to getNodes"); @@ -406,33 +395,39 @@ public HddsProtos.Pipeline getProtobufMessage(int clientVersion) // To save the message size on wire, only transfer the node order based on // network topology - List nodes = nodesInOrder; - if (!nodes.isEmpty()) { - for (int i = 0; i < nodes.size(); i++) { + if (!nodesInOrder.isEmpty()) { + for (int i = 0; i < nodesInOrder.size(); i++) { Iterator it = nodeStatus.keySet().iterator(); for (int j = 0; j < nodeStatus.keySet().size(); j++) { - if (it.next().equals(nodes.get(i))) { + if (it.next().equals(nodesInOrder.get(i))) { builder.addMemberOrders(j); break; } } } if (LOG.isDebugEnabled()) { - LOG.debug("Serialize pipeline {} with nodesInOrder {}", id, nodes); + LOG.debug("Serialize pipeline {} with nodesInOrder {}", id, nodesInOrder); } } return builder.build(); } - static Pipeline getFromProtobufSetCreationTimestamp( + private static Pipeline getFromProtobufSetCreationTimestamp( HddsProtos.Pipeline proto) throws UnknownPipelineStateException { - final Pipeline pipeline = getFromProtobuf(proto); - // When SCM is restarted, set Creation time with current time. - pipeline.setCreationTimestamp(Instant.now()); - return pipeline; + return toBuilder(proto) + .setCreateTimestamp(Instant.now()) + .build(); } - public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) + public Pipeline copyWithNodesInOrder(List nodes) { + return toBuilder().setNodesInOrder(nodes).build(); + } + + public Builder toBuilder() { + return newBuilder(this); + } + + public static Builder toBuilder(HddsProtos.Pipeline pipeline) throws UnknownPipelineStateException { Preconditions.checkNotNull(pipeline, "Pipeline is null"); @@ -473,9 +468,13 @@ public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) .setReplicaIndexes(nodes) .setLeaderId(leaderId) .setSuggestedLeaderId(suggestedLeaderId) - .setNodesInOrder(pipeline.getMemberOrdersList()) - .setCreateTimestamp(pipeline.getCreationTimeStamp()) - .build(); + .setNodeOrder(pipeline.getMemberOrdersList()) + .setCreateTimestamp(pipeline.getCreationTimeStamp()); + } + + public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) + throws UnknownPipelineStateException { + return toBuilder(pipeline).build(); } @Override @@ -529,10 +528,6 @@ public static Builder newBuilder(Pipeline pipeline) { return new Builder(pipeline); } - private void setReplicaIndexes(Map replicaIndexes) { - this.replicaIndexes = replicaIndexes; - } - /** * Builder class for Pipeline. */ @@ -546,7 +541,7 @@ public static class Builder { private UUID leaderId = null; private Instant creationTimestamp = null; private UUID suggestedLeaderId = null; - private Map replicaIndexes = new HashMap<>(); + private Map replicaIndexes; public Builder() { } @@ -559,8 +554,8 @@ public Builder(Pipeline pipeline) { this.leaderId = pipeline.getLeaderId(); this.creationTimestamp = pipeline.getCreationTimestamp(); this.suggestedLeaderId = pipeline.getSuggestedLeaderId(); - this.replicaIndexes = new HashMap<>(); if (nodeStatus != null) { + replicaIndexes = new HashMap<>(); for (DatanodeDetails dn : nodeStatus.keySet()) { int index = pipeline.getReplicaIndex(dn); if (index > 0) { @@ -601,11 +596,22 @@ public Builder setNodes(List nodes) { return this; } - public Builder setNodesInOrder(List orders) { + public Builder setNodeOrder(List orders) { + // for build from ProtoBuf this.nodeOrder = orders; return this; } + public Builder setNodesInOrder(List nodes) { + this.nodesInOrder = new LinkedList<>(nodes); + return this; + } + + public Builder setCreateTimestamp(Instant instant) { + this.creationTimestamp = instant; + return this; + } + public Builder setCreateTimestamp(long createTimestamp) { this.creationTimestamp = Instant.ofEpochMilli(createTimestamp); return this; @@ -627,19 +633,8 @@ public Pipeline build() { Preconditions.checkNotNull(replicationConfig); Preconditions.checkNotNull(state); Preconditions.checkNotNull(nodeStatus); - Pipeline pipeline = - new Pipeline(id, replicationConfig, state, nodeStatus, - suggestedLeaderId); - pipeline.setLeaderId(leaderId); - // overwrite with original creationTimestamp - if (creationTimestamp != null) { - pipeline.setCreationTimestamp(creationTimestamp); - } - - pipeline.setReplicaIndexes(replicaIndexes); if (nodeOrder != null && !nodeOrder.isEmpty()) { - // This branch is for build from ProtoBuf List nodesWithOrder = new ArrayList<>(); for (int i = 0; i < nodeOrder.size(); i++) { int nodeIndex = nodeOrder.get(i); @@ -657,13 +652,10 @@ public Pipeline build() { LOG.debug("Deserialize nodesInOrder {} in pipeline {}", nodesWithOrder, id); } - pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null) { - // This branch is for pipeline clone - pipeline.setNodesInOrder(nodesInOrder); + nodesInOrder = nodesWithOrder; } - return pipeline; + return new Pipeline(this); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 69f190c7fbd8..0747f04584bd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -27,6 +27,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; @@ -203,15 +204,19 @@ public List allocateBlock( AllocatedBlock block = scm.getScmBlockManager() .allocateBlock(size, replicationConfig, owner, excludeList); if (block != null) { - blocks.add(block); // Sort the datanodes if client machine is specified final Node client = getClientNode(clientMachine); if (client != null) { final List nodes = block.getPipeline().getNodes(); final List sorted = scm.getClusterMap() .sortByDistanceCost(client, nodes, nodes.size()); - block.getPipeline().setNodesInOrder(sorted); + if (!Objects.equals(sorted, block.getPipeline().getNodesInOrder())) { + block = block.toBuilder() + .setPipeline(block.getPipeline().copyWithNodesInOrder(sorted)) + .build(); + } } + blocks.add(block); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java index fb312dfb5096..79c937ceb58b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestXceiverClientGrpc.java @@ -71,8 +71,8 @@ public void setup() { RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) .setState(Pipeline.PipelineState.CLOSED) .setNodes(dns) + .setNodesInOrder(dnsInOrder) .build(); - pipeline.setNodesInOrder(dnsInOrder); } @Test diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 3786601dd63a..af6b41b6100b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -1880,7 +1880,9 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { LOG.debug("Found sorted datanodes for pipeline {} and client {} " + "in cache", pipeline.getId(), clientMachine); } - pipeline.setNodesInOrder(sortedNodes); + if (!Objects.equals(pipeline.getNodesInOrder(), sortedNodes)) { + k.setPipeline(pipeline.copyWithNodesInOrder(sortedNodes)); + } } } } From f7a421b27bde7cce83ba18a35b0658b9ab106ba5 Mon Sep 17 00:00:00 2001 From: Andrei Mikhalev <4503006+Montura@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:32:30 +0300 Subject: [PATCH 096/108] HDDS-10446. Refactor Node2ObjectsMap, Node2PipelineMap, Node2ContainerMap (#6303) --- .../scm/node/states/Node2ContainerMap.java | 92 ---------------- .../scm/node/states/Node2PipelineMap.java | 28 +++-- .../hdds/scm/container/MockNodeManager.java | 1 - .../scm/container/Node2ContainerMap.java} | 101 +++++++++--------- .../TestNode2ContainerMap.java | 6 +- 5 files changed, 72 insertions(+), 156 deletions(-) delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java rename hadoop-hdds/server-scm/src/{main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java => test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java} (63%) rename hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/{node/states => container}/TestNode2ContainerMap.java (99%) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java deleted file mode 100644 index c0f46f15fe20..000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ContainerMap.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.hdds.scm.node.states; - -import java.util.HashSet; -import java.util.Set; -import java.util.UUID; - -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .NO_SUCH_DATANODE; - -/** - * This data structure maintains the list of containers that is on a datanode. - * This information is built from the DN container reports. - */ -public class Node2ContainerMap extends Node2ObjectsMap { - - /** - * Constructs a Node2ContainerMap Object. - */ - public Node2ContainerMap() { - super(); - } - - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - public Set getContainers(UUID datanode) { - return getObjects(datanode); - } - - /** - * Insert a new datanode into Node2Container Map. - * - * @param datanodeID -- Datanode UUID - * @param containerIDs - List of ContainerIDs. - */ - @Override - public void insertNewDatanode(UUID datanodeID, Set containerIDs) - throws SCMException { - super.insertNewDatanode(datanodeID, containerIDs); - } - - /** - * Updates the Container list of an existing DN. - * - * @param datanodeID - UUID of DN. - * @param containers - Set of Containers tht is present on DN. - * @throws SCMException - if we don't know about this datanode, for new DN - * use addDatanodeInContainerMap. - */ - public void setContainersForDatanode(UUID datanodeID, - Set containers) throws SCMException { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(containers); - if (dn2ObjectMap - .computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) - == null) { - throw new SCMException("No such datanode", NO_SUCH_DATANODE); - } - } - - @VisibleForTesting - @Override - public int size() { - return dn2ObjectMap.size(); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java index 6533cb807642..35107829f883 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java @@ -18,10 +18,14 @@ package org.apache.hadoop.hdds.scm.node.states; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; @@ -34,11 +38,13 @@ *

TODO: this information needs to be regenerated from pipeline reports * on SCM restart */ -public class Node2PipelineMap extends Node2ObjectsMap { +public class Node2PipelineMap { + private final Map> dn2PipelineMap = new ConcurrentHashMap<>(); - /** Constructs a Node2PipelineMap Object. */ + /** + * Constructs a Node2PipelineMap Object. + */ public Node2PipelineMap() { - super(); } /** @@ -47,17 +53,19 @@ public Node2PipelineMap() { * @param datanode - UUID * @return Set of pipelines or Null. */ - public Set getPipelines(UUID datanode) { - return getObjects(datanode); + public Set getPipelines(@Nonnull UUID datanode) { + final Set s = dn2PipelineMap.get(datanode); + return s != null ? new HashSet<>(s) : Collections.emptySet(); } /** * Return 0 if there are no pipelines associated with this datanode ID. + * * @param datanode - UUID * @return Number of pipelines or 0. */ public int getPipelinesCount(UUID datanode) { - return getObjects(datanode).size(); + return getPipelines(datanode).size(); } /** @@ -65,18 +73,18 @@ public int getPipelinesCount(UUID datanode) { * * @param pipeline Pipeline to be added */ - public synchronized void addPipeline(Pipeline pipeline) { + public void addPipeline(Pipeline pipeline) { for (DatanodeDetails details : pipeline.getNodes()) { UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) + dn2PipelineMap.computeIfAbsent(dnId, k -> ConcurrentHashMap.newKeySet()) .add(pipeline.getId()); } } - public synchronized void removePipeline(Pipeline pipeline) { + public void removePipeline(Pipeline pipeline) { for (DatanodeDetails details : pipeline.getNodes()) { UUID dnId = details.getUuid(); - dn2ObjectMap.computeIfPresent(dnId, + dn2PipelineMap.computeIfPresent(dnId, (k, v) -> { v.remove(pipeline.getId()); return v; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 84f3684ab7cc..21c3f1c9a8ab 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdds.scm.node.DatanodeUsageInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeStatus; -import org.apache.hadoop.hdds.scm.node.states.Node2ContainerMap; import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java similarity index 63% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java index 5269a7aaeb3e..507eb75c5d78 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java @@ -16,37 +16,47 @@ * */ -package org.apache.hadoop.hdds.scm.node.states; +package org.apache.hadoop.hdds.scm.container; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.states.ReportResult; -import java.util.UUID; -import java.util.Set; +import java.util.Collections; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.TreeSet; -import java.util.HashSet; -import java.util.Collections; - +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE; /** * This data structure maintains the list of containers that is on a datanode. * This information is built from the DN container reports. */ -public class Node2ObjectsMap { +class Node2ContainerMap { + private final Map> dn2ContainerMap = new ConcurrentHashMap<>(); - @SuppressWarnings("visibilitymodifier") - protected final Map> dn2ObjectMap; /** * Constructs a Node2ContainerMap Object. */ - public Node2ObjectsMap() { - dn2ObjectMap = new ConcurrentHashMap<>(); + Node2ContainerMap() { + super(); + } + + /** + * Returns null if there no containers associated with this datanode ID. + * + * @param datanode - UUID + * @return Set of containers or Null. + */ + public @Nonnull Set getContainers(@Nonnull UUID datanode) { + final Set s = dn2ContainerMap.get(datanode); + return s != null ? new HashSet<>(s) : Collections.emptySet(); } /** @@ -56,9 +66,8 @@ public Node2ObjectsMap() { * @param datanodeID - UUID of the Datanode. * @return True if this is tracked, false if this map does not know about it. */ - public boolean isKnownDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - return dn2ObjectMap.containsKey(datanodeID); + public boolean isKnownDatanode(@Nonnull UUID datanodeID) { + return dn2ContainerMap.containsKey(datanodeID); } /** @@ -67,15 +76,10 @@ public boolean isKnownDatanode(UUID datanodeID) { * @param datanodeID -- Datanode UUID * @param containerIDs - List of ContainerIDs. */ - @VisibleForTesting - public void insertNewDatanode(UUID datanodeID, Set containerIDs) + public void insertNewDatanode(@Nonnull UUID datanodeID, @Nonnull Set containerIDs) throws SCMException { - Preconditions.checkNotNull(containerIDs); - Preconditions.checkNotNull(datanodeID); - if (dn2ObjectMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) - != null) { - throw new SCMException("Node already exists in the map", - DUPLICATE_DATANODE); + if (dn2ContainerMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs)) != null) { + throw new SCMException("Node already exists in the map", DUPLICATE_DATANODE); } } @@ -84,32 +88,15 @@ public void insertNewDatanode(UUID datanodeID, Set containerIDs) * * @param datanodeID - Datanode ID. */ - @VisibleForTesting - public void removeDatanode(UUID datanodeID) { - Preconditions.checkNotNull(datanodeID); - dn2ObjectMap.computeIfPresent(datanodeID, (k, v) -> null); + public void removeDatanode(@Nonnull UUID datanodeID) { + dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null); } - /** - * Returns null if there no containers associated with this datanode ID. - * - * @param datanode - UUID - * @return Set of containers or Null. - */ - Set getObjects(UUID datanode) { - Preconditions.checkNotNull(datanode); - final Set s = dn2ObjectMap.get(datanode); - return s != null ? new HashSet<>(s) : Collections.emptySet(); - } - - public ReportResult.ReportResultBuilder newBuilder() { + public @Nonnull ReportResult.ReportResultBuilder newBuilder() { return new ReportResult.ReportResultBuilder<>(); } - public ReportResult processReport(UUID datanodeID, Set objects) { - Preconditions.checkNotNull(datanodeID); - Preconditions.checkNotNull(objects); - + public @Nonnull ReportResult processReport(@Nonnull UUID datanodeID, @Nonnull Set objects) { if (!isKnownDatanode(datanodeID)) { return newBuilder() .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND) @@ -118,11 +105,11 @@ public ReportResult processReport(UUID datanodeID, Set objects) { } // Conditions like Zero length containers should be handled by removeAll. - Set currentSet = dn2ObjectMap.get(datanodeID); - TreeSet newObjects = new TreeSet<>(objects); + Set currentSet = dn2ContainerMap.get(datanodeID); + TreeSet newObjects = new TreeSet<>(objects); newObjects.removeAll(currentSet); - TreeSet missingObjects = new TreeSet<>(currentSet); + TreeSet missingObjects = new TreeSet<>(currentSet); missingObjects.removeAll(objects); if (newObjects.isEmpty() && missingObjects.isEmpty()) { @@ -159,8 +146,22 @@ public ReportResult processReport(UUID datanodeID, Set objects) { .build(); } - @VisibleForTesting + /** + * Updates the Container list of an existing DN. + * + * @param datanodeID - UUID of DN. + * @param containers - Set of Containers tht is present on DN. + * @throws SCMException - if we don't know about this datanode, for new DN + * use addDatanodeInContainerMap. + */ + public void setContainersForDatanode(@Nonnull UUID datanodeID, @Nonnull Set containers) + throws SCMException { + if (dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> new HashSet<>(containers)) == null) { + throw new SCMException("No such datanode", NO_SUCH_DATANODE); + } + } + public int size() { - return dn2ObjectMap.size(); + return dn2ContainerMap.size(); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java similarity index 99% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java index 0aab0aeca837..92e0a2c494f5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java @@ -17,10 +17,10 @@ * */ -package org.apache.hadoop.hdds.scm.node.states; +package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.node.states.ReportResult; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -32,9 +32,9 @@ import java.util.concurrent.ConcurrentHashMap; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.assertFalse; /** * Test classes for Node2ContainerMap. From 80592135edffbcec2087edfd70c3b31b70841ebc Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Tue, 5 Mar 2024 16:04:33 +0530 Subject: [PATCH 097/108] HDDS-10458. Mention `ozone admin datanode status decommission` in docs (#6322) --- hadoop-hdds/docs/content/feature/Decommission.md | 8 ++++++++ hadoop-hdds/docs/content/feature/Decommission.zh.md | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/hadoop-hdds/docs/content/feature/Decommission.md b/hadoop-hdds/docs/content/feature/Decommission.md index 86a345a460be..8058c0c0902e 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.md +++ b/hadoop-hdds/docs/content/feature/Decommission.md @@ -51,6 +51,14 @@ ozone admin datanode decommission [-hV] [-id=] ``` You can enter multiple hosts to decommission multiple datanodes together. +To view the status of a decommissioning datanode, you can execute the following command: + +```shell +ozone admin datanode status decommission [-hV] [-id=] [--scm=] [--id=] [--ip=] +``` +You can pass the IP address or UUID of one datanode to view only the details related to that datanode. + + **Note:** To recommission a datanode you may execute the below command in cli, ```shell ozone admin datanode recommission [-hV] [-id=] diff --git a/hadoop-hdds/docs/content/feature/Decommission.zh.md b/hadoop-hdds/docs/content/feature/Decommission.zh.md index ad959469b953..231539fe0d1b 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.zh.md +++ b/hadoop-hdds/docs/content/feature/Decommission.zh.md @@ -50,6 +50,14 @@ ozone admin datanode decommission [-hV] [-id=] ``` 您可以输入多个主机,以便一起Decommission多个DataNode。 +查看 Decommission时datanode 的状态,可以执行下面的命令, + +```shell +ozone admin datanode status decommission [-hV] [-id=] [--scm=] [--id=] [--ip=] +``` +您可以指定一个 Datanode 的 IP address 或 UUID 以查看该 Datanode 相关的详细信息。 + + **Note:** 要Recommission某台DataNode的时候,可在命令行执行以下命令, ```shell ozone admin datanode recommission [-hV] [-id=] From b69674c7f89a92ab2ad066548495e42482bd97c2 Mon Sep 17 00:00:00 2001 From: Ivan Andika <36403683+ivandika3@users.noreply.github.com> Date: Tue, 5 Mar 2024 19:56:16 +0800 Subject: [PATCH 098/108] HDDS-10412. Prefix ACL check needs to resolve the bucket link (#6268) --- .../ozone/security/acl/OzoneObjInfo.java | 10 +++ .../dist/src/main/smoketest/basic/links.robot | 15 ++++ .../hadoop/ozone/om/TestOzoneManagerHA.java | 11 +++ .../om/TestOzoneManagerHAWithAllRunning.java | 86 ++++++++++++++++++- .../hadoop/ozone/om/OMMetadataManager.java | 1 - .../hadoop/ozone/om/BucketManagerImpl.java | 3 +- .../hadoop/ozone/om/KeyManagerImpl.java | 6 -- .../hadoop/ozone/om/OmMetadataReader.java | 2 +- .../hadoop/ozone/om/OmSnapshotManager.java | 2 +- .../apache/hadoop/ozone/om/OzoneManager.java | 2 +- .../hadoop/ozone/om/PrefixManagerImpl.java | 54 +++++++++--- .../hadoop/ozone/om/ResolvedBucket.java | 11 +++ .../key/acl/prefix/OMPrefixAclRequest.java | 86 ++++++++++--------- .../key/acl/prefix/OMPrefixAddAclRequest.java | 29 +++---- .../acl/prefix/OMPrefixRemoveAclRequest.java | 21 +++-- .../key/acl/prefix/OMPrefixSetAclRequest.java | 21 +++-- .../request/key/TestOMKeyCreateRequest.java | 2 +- .../request/key/TestOMPrefixAclRequest.java | 8 +- .../acl/prefix/TestOMPrefixAclResponse.java | 23 ++++- 19 files changed, 281 insertions(+), 112 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 09c8743137d4..ca32c96855dd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -186,6 +186,16 @@ public static Builder fromKeyArgs(OmKeyArgs args) { .setResType(ResourceType.KEY); } + public static Builder fromOzoneObj(OzoneObj obj) { + return new Builder() + .setVolumeName(obj.getVolumeName()) + .setBucketName(obj.getBucketName()) + .setKeyName(obj.getKeyName()) + .setResType(obj.getResourceType()) + .setStoreType(obj.getStoreType()) + .setOzonePrefixPath(obj.getOzonePrefixPathViewer()); + } + public Builder setResType(OzoneObj.ResourceType res) { this.resType = res; return this; diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/links.robot b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot index 09437aca24ff..d4d53c8d3cc6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/links.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/links.robot @@ -171,6 +171,21 @@ Source and target key have same ACLs Verify ACL key ${target}/link1/key1 GROUP group2 READ Verify ACL key ${source}/bucket1/key1 GROUP group2 READ +Source and target prefix have same ACLs + Execute ozone sh prefix addacl --acl user:user1:rwxy ${source}/bucket1/prefix1/ + Verify ACL prefix ${target}/link1/prefix1/ USER user1 READ WRITE READ_ACL WRITE_ACL + Verify ACL prefix ${source}/bucket1/prefix1/ USER user1 READ WRITE READ_ACL WRITE_ACL + Execute ozone sh prefix removeacl --acl user:user1:y ${target}/link1/prefix1/ + Verify ACL prefix ${target}/link1/prefix1/ USER user1 READ WRITE READ_ACL + Verify ACL prefix ${source}/bucket1/prefix1/ USER user1 READ WRITE READ_ACL + Execute ozone sh prefix setacl --acl user:user1:rw ${source}/bucket1/prefix1/ + Verify ACL prefix ${target}/link1/prefix1/ USER user1 READ WRITE + Verify ACL prefix ${source}/bucket1/prefix1/ USER user1 READ WRITE + + Execute ozone sh prefix addacl --acl group:group2:r ${source}/bucket1/prefix1/ + Verify ACL prefix ${target}/link1/prefix1/ GROUP group2 READ + Verify ACL prefix ${source}/bucket1/prefix1/ GROUP group2 READ + Buckets and links share namespace Execute ozone sh bucket link ${source}/bucket2 ${target}/link2 ${result} = Execute And Ignore Error ozone sh bucket create ${target}/link2 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 2c6ab49b210d..ba0dabf47dd5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.ha.HadoopRpcOMFailoverProxyProvider; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServerConfig; +import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; @@ -47,6 +48,7 @@ import java.io.IOException; import java.net.ConnectException; import java.time.Duration; +import java.util.Collections; import java.util.Iterator; import java.util.UUID; import java.util.HashMap; @@ -61,6 +63,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.assertj.core.api.Assertions.assertThat; @@ -212,6 +215,14 @@ public static void createKey(OzoneBucket ozoneBucket, String keyName) throws IOE ozoneOutputStream.close(); } + public static String createPrefixName() { + return "prefix" + RandomStringUtils.randomNumeric(5) + OZONE_URI_DELIMITER; + } + + public static void createPrefix(OzoneObj prefixObj) throws IOException { + assertTrue(objectStore.setAcl(prefixObj, Collections.emptyList())); + } + protected OzoneBucket setupBucket() throws Exception { String userName = "user" + RandomStringUtils.randomNumeric(5); String adminName = "admin" + RandomStringUtils.randomNumeric(5); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java index fbf80a8a879f..8b63136adf62 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java @@ -70,6 +70,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PARTIAL_DELETE; +import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.GROUP; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE; @@ -860,6 +861,79 @@ void testLinkBucketSetKeyAcl() throws Exception { } + @Test + void testLinkBucketAddPrefixAcl() throws Exception { + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user1 = "remoteUser1"; + OzoneAcl acl1 = new OzoneAcl(USER, user1, READ, DEFAULT); + testAddAcl(user1, linkObj, acl1); // case1: set link acl + assertEqualsAcls(srcObj, linkObj); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + testAddAcl(user2, srcObj, acl2); // case2: set src acl + assertEqualsAcls(srcObj, linkObj); + + } + + @Test + void testLinkBucketRemovePrefixAcl() throws Exception { + + // CASE 1: from link bucket + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user = "remoteUser1"; + OzoneAcl acl = new OzoneAcl(USER, user, READ, DEFAULT); + testRemoveAcl(user, linkObj, acl); + assertEqualsAcls(srcObj, linkObj); + + // CASE 2: from src bucket + OzoneBucket srcBucket2 = setupBucket(); + OzoneBucket linkedBucket2 = linkBucket(srcBucket2); + String prefix2 = createPrefixName(); + OzoneObj linkObj2 = buildPrefixObj(linkedBucket2, prefix2); + OzoneObj srcObj2 = buildPrefixObj(srcBucket2, prefix2); + createPrefix(srcObj2); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + testRemoveAcl(user2, srcObj2, acl2); + assertEqualsAcls(srcObj2, linkObj2); + + } + + @Test + void testLinkBucketSetPrefixAcl() throws Exception { + OzoneBucket srcBucket = setupBucket(); + OzoneBucket linkedBucket = linkBucket(srcBucket); + String prefix = createPrefixName(); + OzoneObj linkObj = buildPrefixObj(linkedBucket, prefix); + OzoneObj srcObj = buildPrefixObj(srcBucket, prefix); + createPrefix(linkObj); + + String user1 = "remoteUser1"; + OzoneAcl acl1 = new OzoneAcl(USER, user1, READ, DEFAULT); + testSetAcl(user1, linkObj, acl1); // case1: set link acl + assertEqualsAcls(srcObj, linkObj); + + String user2 = "remoteUser2"; + OzoneAcl acl2 = new OzoneAcl(USER, user2, READ, DEFAULT); + testSetAcl(user2, srcObj, acl2); // case2: set src acl + assertEqualsAcls(srcObj, linkObj); + + } + private OzoneObj buildBucketObj(OzoneBucket bucket) { return OzoneObjInfo.Builder.newBuilder() .setResType(OzoneObj.ResourceType.BUCKET) @@ -990,8 +1064,16 @@ private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, OzoneAcl userAcl) throws Exception { ObjectStore objectStore = getObjectStore(); - // As by default create will add some default acls in RpcClient. - List acls = objectStore.getAcl(ozoneObj); + // Other than prefix, by default create will add some default acls in RpcClient. + List acls; + if (ozoneObj.getResourceType().equals(OzoneObj.ResourceType.PREFIX)) { + objectStore.addAcl(ozoneObj, userAcl); + // Add another arbitrary group ACL since the prefix will be removed when removing + // the last ACL for the prefix and PREFIX_NOT_FOUND will be thrown + OzoneAcl groupAcl = new OzoneAcl(GROUP, "arbitrary-group", READ, ACCESS); + objectStore.addAcl(ozoneObj, groupAcl); + } + acls = objectStore.getAcl(ozoneObj); assertTrue(acls.size() > 0); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 9651c16175a9..0a2d258c7db4 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -122,7 +122,6 @@ public interface OMMetadataManager extends DBStoreHAManager { * @param key - key name * @return DB key as String. */ - String getOzoneKey(String volume, String bucket, String key); /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 5bc894b2b922..68429c36d084 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -154,7 +154,8 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) && context.getAclRights() != ACLType.READ); if (bucketNeedResolved || - ozObject.getResourceType() == OzoneObj.ResourceType.KEY) { + ozObject.getResourceType() == OzoneObj.ResourceType.KEY || + ozObject.getResourceType() == OzoneObj.ResourceType.PREFIX) { try { ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index af6b41b6100b..ffe1908c6852 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -923,12 +923,6 @@ private String getPartName(PartKeyInfo partKeyInfo, String volName, return partName; } - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ @Override public List getAcl(OzoneObj obj) throws IOException { validateOzoneObj(obj); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java index 7c332788d28a..84a5148720b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataReader.java @@ -387,7 +387,7 @@ public List getAcl(OzoneObj obj) throws IOException { String volumeName = obj.getVolumeName(); String bucketName = obj.getBucketName(); String keyName = obj.getKeyName(); - if (obj.getResourceType() == ResourceType.KEY) { + if (obj.getResourceType() == ResourceType.KEY || obj.getResourceType() == ResourceType.PREFIX) { ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( Pair.of(volumeName, bucketName)); volumeName = resolvedBucket.realVolume(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index eb37e399dfe6..602620743b0b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -372,7 +372,7 @@ public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { try { // create the other manager instances based on snapshot // metadataManager - PrefixManagerImpl pm = new PrefixManagerImpl(snapshotMetadataManager, + PrefixManagerImpl pm = new PrefixManagerImpl(ozoneManager, snapshotMetadataManager, false); KeyManagerImpl km = new KeyManagerImpl(ozoneManager, ozoneManager.getScmClient(), snapshotMetadataManager, conf, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index b6bd57ff6f51..b8133e5844f9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -839,7 +839,7 @@ private void instantiateServices(boolean withNewSnapshot) throws IOException { delegationTokenMgr = createDelegationTokenSecretManager(configuration); } - prefixManager = new PrefixManagerImpl(metadataManager, isRatisEnabled); + prefixManager = new PrefixManagerImpl(this, metadataManager, isRatisEnabled); keyManager = new KeyManagerImpl(this, scmClient, configuration, perfMetrics); accessAuthorizer = OzoneAuthorizerFactory.forOM(this); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index d801d1dbf331..097f354bbbc1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -18,6 +18,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -40,6 +42,7 @@ import java.util.stream.Collectors; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK; @@ -53,6 +56,7 @@ public class PrefixManagerImpl implements PrefixManager { LoggerFactory.getLogger(PrefixManagerImpl.class); private static final List EMPTY_ACL_LIST = new ArrayList<>(); + private final OzoneManager ozoneManager; private final OMMetadataManager metadataManager; // In-memory prefix tree to optimize ACL evaluation @@ -62,9 +66,10 @@ public class PrefixManagerImpl implements PrefixManager { // where we integrate both HA and Non-HA code. private boolean isRatisEnabled; - public PrefixManagerImpl(OMMetadataManager metadataManager, + public PrefixManagerImpl(OzoneManager ozoneManager, OMMetadataManager metadataManager, boolean isRatisEnabled) { this.isRatisEnabled = isRatisEnabled; + this.ozoneManager = ozoneManager; this.metadataManager = metadataManager; loadPrefixTree(); } @@ -90,16 +95,11 @@ public OMMetadataManager getMetadataManager() { return metadataManager; } - /** - * Returns list of ACLs for given Ozone object. - * - * @param obj Ozone object. - * @throws IOException if there is error. - */ @Override public List getAcl(OzoneObj obj) throws IOException { validateOzoneObj(obj); - String prefixPath = obj.getPath(); + OzoneObj resolvedObj = getResolvedPrefixObj(obj); + String prefixPath = resolvedObj.getPath(); metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); try { String longestPrefix = prefixTree.getLongestPrefix(prefixPath); @@ -149,7 +149,14 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) Objects.requireNonNull(ozObject); Objects.requireNonNull(context); - String prefixPath = ozObject.getPath(); + OzoneObj resolvedObj; + try { + resolvedObj = getResolvedPrefixObj(ozObject); + } catch (IOException e) { + throw new OMException("Failed to resolveBucketLink:", e, INTERNAL_ERROR); + } + + String prefixPath = resolvedObj.getPath(); metadataManager.getLock().acquireReadLock(PREFIX_LOCK, prefixPath); try { String longestPrefix = prefixTree.getLongestPrefix(prefixPath); @@ -312,6 +319,7 @@ private void inheritParentAcl(OzoneObj ozoneObj, OmPrefixInfo prefixInfo) public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, OmPrefixInfo prefixInfo, long transactionLogIndex) throws IOException { + boolean newPrefix = false; if (prefixInfo == null) { OmPrefixInfo.Builder prefixInfoBuilder = new OmPrefixInfo.Builder() @@ -322,10 +330,13 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, prefixInfoBuilder.setUpdateID(transactionLogIndex); } prefixInfo = prefixInfoBuilder.build(); + newPrefix = true; } boolean changed = prefixInfo.setAcls(ozoneAcls); - inheritParentAcl(ozoneObj, prefixInfo); + if (newPrefix) { + inheritParentAcl(ozoneObj, prefixInfo); + } prefixTree.insert(ozoneObj.getPath(), prefixInfo); if (!isRatisEnabled) { metadataManager.getPrefixTable().put(ozoneObj.getPath(), prefixInfo); @@ -333,12 +344,31 @@ public OMPrefixAclOpResult setAcl(OzoneObj ozoneObj, List ozoneAcls, return new OMPrefixAclOpResult(prefixInfo, changed); } + /** + * Get the resolved prefix object to handle prefix that is under a link bucket. + * @param obj prefix object + * @return the resolved prefix object if the object belongs under a link bucket. + * Otherwise, return the same prefix object. + * @throws IOException Exception thrown when resolving the bucket link. + */ + public OzoneObj getResolvedPrefixObj(OzoneObj obj) throws IOException { + if (StringUtils.isEmpty(obj.getVolumeName()) || StringUtils.isEmpty(obj.getBucketName())) { + return obj; + } + + ResolvedBucket resolvedBucket = ozoneManager.resolveBucketLink( + Pair.of(obj.getVolumeName(), obj.getBucketName())); + return resolvedBucket.update(obj); + } + /** * Result of the prefix acl operation. */ public static class OMPrefixAclOpResult { - private OmPrefixInfo omPrefixInfo; - private boolean operationsResult; + /** The updated prefix info after applying the prefix acl operation. */ + private final OmPrefixInfo omPrefixInfo; + /** Operation result, success if the underlying ACL is changed, false otherwise. */ + private final boolean operationsResult; public OMPrefixAclOpResult(OmPrefixInfo omPrefixInfo, boolean operationsResult) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java index 9c304ac2f1cc..af1db8bad368 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ResolvedBucket.java @@ -23,6 +23,8 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import java.util.LinkedHashMap; import java.util.Map; @@ -120,6 +122,15 @@ public KeyArgs update(KeyArgs args) { : args; } + public OzoneObj update(OzoneObj ozoneObj) { + return isLink() + ? OzoneObjInfo.Builder.fromOzoneObj(ozoneObj) + .setVolumeName(realVolume()) + .setBucketName(realBucket()) + .build() + : ozoneObj; + } + public boolean isLink() { return !Objects.equals(requestedVolume, realVolume) || !Objects.equals(requestedBucket, realBucket); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 345886c050b5..a8490b111524 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -33,9 +33,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.util.ObjectParser; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -66,9 +64,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); boolean lockAcquired = false; - String volume = null; - String bucket = null; - String key = null; + String prefixPath = null; + OzoneObj resolvedPrefixObj = null; OMPrefixAclOpResult operationResult = null; boolean opResult = false; Result result = null; @@ -76,20 +73,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn PrefixManagerImpl prefixManager = (PrefixManagerImpl) ozoneManager.getPrefixManager(); try { + resolvedPrefixObj = prefixManager.getResolvedPrefixObj(getOzoneObj()); prefixManager.validateOzoneObj(getOzoneObj()); - String prefixPath = getOzoneObj().getPath(); - validatePrefixPath(prefixPath); - ObjectParser objectParser = new ObjectParser(prefixPath, - OzoneManagerProtocolProtos.OzoneObj.ObjectType.PREFIX); - volume = objectParser.getVolume(); - bucket = objectParser.getBucket(); - key = objectParser.getKey(); + validatePrefixPath(resolvedPrefixObj.getPath()); + prefixPath = resolvedPrefixObj.getPath(); // check Acl if (ozoneManager.getAclsEnabled()) { checkAcls(ozoneManager, OzoneObj.ResourceType.PREFIX, OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, - volume, bucket, key); + resolvedPrefixObj.getVolumeName(), resolvedPrefixObj.getBucketName(), + resolvedPrefixObj.getPrefixName()); } mergeOmLockDetails(omMetadataManager.getLock() @@ -102,7 +96,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } try { - operationResult = apply(prefixManager, omPrefixInfo, trxnLogIndex); + operationResult = apply(resolvedPrefixObj, prefixManager, omPrefixInfo, trxnLogIndex); } catch (IOException ex) { // In HA case this will never happen. // As in add/remove/setAcl method we have logic to update database, @@ -145,16 +139,21 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } finally { if (lockAcquired) { mergeOmLockDetails(omMetadataManager.getLock() - .releaseWriteLock(PREFIX_LOCK, getOzoneObj().getPath())); + .releaseWriteLock(PREFIX_LOCK, prefixPath)); } if (omClientResponse != null) { omClientResponse.setOmLockDetails(getOmLockDetails()); } } - OzoneObj obj = getOzoneObj(); + OzoneObj obj = resolvedPrefixObj; + if (obj == null) { + // Fall back to the prefix under link bucket + obj = getOzoneObj(); + } + Map auditMap = obj.toAuditMap(); - onComplete(opResult, exception, ozoneManager.getMetrics(), result, + onComplete(obj, opResult, exception, ozoneManager.getMetrics(), result, trxnLogIndex, ozoneManager.getAuditLogger(), auditMap); return omClientResponse; @@ -168,24 +167,26 @@ private void validatePrefixPath(String prefixPath) throws OMException { } /** - * Get the path name from the request. - * @return path name + * Get the prefix ozone object passed in the request. + * Note: The ozone object might still refer to a prefix under a link bucket which + * might require to be resolved. + * @return Prefix ozone object. */ abstract OzoneObj getOzoneObj(); // TODO: Finer grain metrics can be moved to these callbacks. They can also // be abstracted into separate interfaces in future. /** - * Get the initial om response builder with lock. - * @return om response builder. + * Get the initial OM response builder with lock. + * @return OM response builder. */ abstract OMResponse.Builder onInit(); /** - * Get the om client response on success case with lock. - * @param omResponse - * @param omPrefixInfo - * @param operationResult + * Get the OM client response on success case with lock. + * @param omResponse OM response builder. + * @param omPrefixInfo The updated prefix info. + * @param operationResult The operation result. See {@link OMPrefixAclOpResult}. * @return OMClientResponse */ abstract OMClientResponse onSuccess( @@ -194,8 +195,8 @@ abstract OMClientResponse onSuccess( /** * Get the om client response on failure case with lock. - * @param omResponse - * @param exception + * @param omResponse OM response builder. + * @param exception Exception thrown while processing the request. * @return OMClientResponse */ abstract OMClientResponse onFailure(OMResponse.Builder omResponse, @@ -204,23 +205,28 @@ abstract OMClientResponse onFailure(OMResponse.Builder omResponse, /** * Completion hook for final processing before return without lock. * Usually used for logging without lock and metric update. - * @param operationResult - * @param exception - * @param omMetrics + * @param resolvedOzoneObj Resolved prefix object in case the prefix is under a link bucket. + * The original ozone object if the prefix is not under a link bucket. + * @param operationResult The operation result. See {@link OMPrefixAclOpResult}. + * @param exception Exception thrown while processing the request. + * @param omMetrics OM metrics used to update the relevant metrics. */ - abstract void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, - AuditLogger auditLogger, Map auditMap); + @SuppressWarnings("checkstyle:ParameterNumber") + abstract void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, + AuditLogger auditLogger, Map auditMap); /** - * Apply the acl operation, if successfully completed returns true, - * else false. - * @param prefixManager - * @param omPrefixInfo - * @param trxnLogIndex - * @throws IOException + * Apply the acl operation to underlying storage (prefix tree and table cache). + * @param resolvedOzoneObj Resolved prefix object in case the prefix is under a link bucket. + * The original ozone object if the prefix is not under a link bucket. + * @param prefixManager Prefix manager used to update the underlying prefix storage. + * @param omPrefixInfo Previous prefix info, null if there is no existing prefix info. + * @param trxnLogIndex Transaction log index. + * @return result of the prefix operation, see {@link OMPrefixAclOpResult}. + * @throws IOException Exception thrown when updating the underlying prefix storage. */ - abstract OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + abstract OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java index fe75928795b6..c290b08939c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAddAclRequest.java @@ -19,10 +19,8 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; import java.io.IOException; -import java.util.List; import java.util.Map; -import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -55,8 +53,8 @@ public class OMPrefixAddAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixAddAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final OzoneAcl ozoneAcl; public OMPrefixAddAclRequest(OMRequest omRequest) { super(omRequest); @@ -65,8 +63,7 @@ public OMPrefixAddAclRequest(OMRequest omRequest) { // TODO: conversion of OzoneObj to protobuf can be avoided when we have // single code path for HA and Non-HA ozoneObj = OzoneObjInfo.fromProtobuf(addAclRequest.getObj()); - ozoneAcls = Lists.newArrayList( - OzoneAcl.fromProtobuf(addAclRequest.getAcl())); + ozoneAcl = OzoneAcl.fromProtobuf(addAclRequest.getAcl()); } @Override @@ -96,41 +93,41 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { if (operationResult) { - LOG.debug("Add acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + LOG.debug("Add acl: {} to path: {} success!", ozoneAcl, + resolvedOzoneObj.getPath()); } else { LOG.debug("Acl {} already exists in path {}", - ozoneAcls, ozoneObj.getPath()); + ozoneAcl, resolvedOzoneObj.getPath()); } } break; case FAILURE: - LOG.error("Add acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + LOG.error("Add acl {} to path {} failed!", ozoneAcl, + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixAddAclRequest: {}", getOmRequest()); } - if (ozoneAcls != null) { - auditMap.put(OzoneConsts.ACL, ozoneAcls.toString()); + if (ozoneAcl != null) { + auditMap.put(OzoneConsts.ACL, ozoneAcl.toString()); } auditLog(auditLogger, buildAuditMessage(OMAction.ADD_ACL, auditMap, exception, getOmRequest().getUserInfo())); } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.addAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo, + return prefixManager.addAcl(resolvedOzoneObj, ozoneAcl, omPrefixInfo, trxnLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java index 67b704121676..7c2666944c57 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixRemoveAclRequest.java @@ -45,15 +45,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; /** - * Handle add Acl request for prefix. + * Handle remove Acl request for prefix. */ public class OMPrefixRemoveAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixRemoveAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final List ozoneAcls; public OMPrefixRemoveAclRequest(OMRequest omRequest) { super(omRequest); @@ -93,25 +93,24 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { if (operationResult) { LOG.debug("Remove acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + resolvedOzoneObj.getPath()); } else { LOG.debug("Acl {} not removed from path {} as it does not exist", - ozoneAcls, ozoneObj.getPath()); + ozoneAcls, resolvedOzoneObj.getPath()); } } break; case FAILURE: - omMetrics.incNumBucketUpdateFails(); LOG.error("Remove acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixRemoveAclRequest: {}", @@ -126,9 +125,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.removeAcl(ozoneObj, ozoneAcls.get(0), omPrefixInfo); + return prefixManager.removeAcl(resolvedOzoneObj, ozoneAcls.get(0), omPrefixInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java index 6e93e8ffe5e0..11fc0d150eea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixSetAclRequest.java @@ -45,15 +45,15 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetAclResponse; /** - * Handle add Acl request for prefix. + * Handle set Acl request for prefix. */ public class OMPrefixSetAclRequest extends OMPrefixAclRequest { private static final Logger LOG = LoggerFactory.getLogger(OMPrefixSetAclRequest.class); - private OzoneObj ozoneObj; - private List ozoneAcls; + private final OzoneObj ozoneObj; + private final List ozoneAcls; public OMPrefixSetAclRequest(OMRequest omRequest) { super(omRequest); @@ -94,20 +94,19 @@ OMClientResponse onFailure(OMResponse.Builder omResponse, } @Override - void onComplete(boolean operationResult, Exception exception, - OMMetrics omMetrics, Result result, long trxnLogIndex, - AuditLogger auditLogger, Map auditMap) { + void onComplete(OzoneObj resolvedOzoneObj, boolean operationResult, + Exception exception, OMMetrics omMetrics, Result result, + long trxnLogIndex, AuditLogger auditLogger, Map auditMap) { switch (result) { case SUCCESS: if (LOG.isDebugEnabled()) { LOG.debug("Set acl: {} to path: {} success!", ozoneAcls, - ozoneObj.getPath()); + resolvedOzoneObj.getPath()); } break; case FAILURE: - omMetrics.incNumBucketUpdateFails(); LOG.error("Set acl {} to path {} failed!", ozoneAcls, - ozoneObj.getPath(), exception); + resolvedOzoneObj.getPath(), exception); break; default: LOG.error("Unrecognized Result for OMPrefixSetAclRequest: {}", @@ -122,9 +121,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - OMPrefixAclOpResult apply(PrefixManagerImpl prefixManager, + OMPrefixAclOpResult apply(OzoneObj resolvedOzoneObj, PrefixManagerImpl prefixManager, OmPrefixInfo omPrefixInfo, long trxnLogIndex) throws IOException { - return prefixManager.setAcl(ozoneObj, ozoneAcls, omPrefixInfo, + return prefixManager.setAcl(resolvedOzoneObj, ozoneAcls, omPrefixInfo, trxnLogIndex); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index f61e947d2b1e..83b491984ed2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -426,7 +426,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound( @MethodSource("data") public void testValidateAndUpdateCacheWithInvalidPath( boolean setKeyPathLock, boolean setFileSystemPaths) throws Exception { - PrefixManager prefixManager = new PrefixManagerImpl( + PrefixManager prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); when(ozoneManager.getOzoneLockProvider()).thenReturn( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index 9c5a9257245f..b2d495a423ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -50,7 +50,7 @@ public class TestOMPrefixAclRequest extends TestOMKeyRequest { @Test public void testAddAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; @@ -116,7 +116,7 @@ public void testAddAclRequest() throws Exception { @Test public void testValidationFailure() { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); @@ -143,7 +143,7 @@ public void testValidationFailure() { @Test public void testRemoveAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; @@ -223,7 +223,7 @@ public void testRemoveAclRequest() throws Exception { @Test public void testSetAclRequest() throws Exception { - PrefixManagerImpl prefixManager = new PrefixManagerImpl( + PrefixManagerImpl prefixManager = new PrefixManagerImpl(ozoneManager, ozoneManager.getMetadataManager(), true); when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); String prefixName = UUID.randomUUID() + OZONE_URI_DELIMITER; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java index b12087785b1f..543266b51aeb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/acl/prefix/TestOMPrefixAclResponse.java @@ -17,8 +17,12 @@ package org.apache.hadoop.ozone.om.response.key.acl.prefix; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; +import org.apache.hadoop.ozone.om.ResolvedBucket; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; import org.apache.hadoop.ozone.om.response.key.TestOMKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -36,6 +40,8 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * Tests TestOMPrefixAclResponse. @@ -77,13 +83,22 @@ public void testAddToDBBatch() throws Exception { .getSkipCache(prefixName); assertEquals(omPrefixInfo, persistedPrefixInfo); + String volumeName = "vol"; + String bucketName = "buck"; + + OzoneManager ozoneManager = mock(OzoneManager.class); + when(ozoneManager.resolveBucketLink(Pair.of(volumeName, bucketName))) + .thenReturn(new ResolvedBucket(volumeName, bucketName, volumeName, + bucketName, "", BucketLayout.DEFAULT)); + + // Verify that in-memory Prefix Tree (Radix Tree) is able to reload from // DB successfully PrefixManagerImpl prefixManager = - new PrefixManagerImpl(omMetadataManager, true); + new PrefixManagerImpl(ozoneManager, omMetadataManager, true); OzoneObj prefixObj = OzoneObjInfo.Builder.newBuilder() - .setVolumeName("vol") - .setBucketName("buck") + .setVolumeName(volumeName) + .setBucketName(bucketName) .setPrefixName("prefix/") .setResType(OzoneObj.ResourceType.PREFIX) .setStoreType(OzoneObj.StoreType.OZONE) @@ -123,7 +138,7 @@ public void testAddToDBBatch() throws Exception { // Reload prefix tree from DB and validate again. prefixManager = - new PrefixManagerImpl(omMetadataManager, true); + new PrefixManagerImpl(ozoneManager, omMetadataManager, true); prefixInfo = prefixManager.getPrefixInfo(prefixObj); assertEquals(2L, prefixInfo.getUpdateID()); From f16b1affebf6effac727ea0b1e8b9bcd338f73ff Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Tue, 5 Mar 2024 18:06:37 +0530 Subject: [PATCH 099/108] HDDS-10424. Improve error message for prefix without trailing slash (#6328) --- .../org/apache/hadoop/ozone/om/TestKeyManagerImpl.java | 8 ++++---- .../org/apache/hadoop/ozone/om/PrefixManagerImpl.java | 5 +++-- .../ozone/om/request/key/TestOMPrefixAclRequest.java | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 67b09a1434a7..9a5ded71484b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -641,7 +641,7 @@ public void testInvalidPrefixAcl() throws IOException { // add acl with invalid prefix name Exception ex = assertThrows(OMException.class, () -> writeClient.addAcl(ozInvalidPrefix, ozAcl1)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); OzoneObj ozPrefix1 = new OzoneObjInfo.Builder() .setVolumeName(volumeName) @@ -659,7 +659,7 @@ public void testInvalidPrefixAcl() throws IOException { // get acl with invalid prefix name ex = assertThrows(OMException.class, () -> writeClient.getAcl(ozInvalidPrefix)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); // set acl with invalid prefix name List ozoneAcls = new ArrayList(); @@ -667,12 +667,12 @@ public void testInvalidPrefixAcl() throws IOException { ex = assertThrows(OMException.class, () -> writeClient.setAcl(ozInvalidPrefix, ozoneAcls)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); // remove acl with invalid prefix name ex = assertThrows(OMException.class, () -> writeClient.removeAcl(ozInvalidPrefix, ozAcl1)); - assertTrue(ex.getMessage().startsWith("Invalid prefix name")); + assertTrue(ex.getMessage().startsWith("Missing trailing slash")); } @Test diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index 097f354bbbc1..47b44f7d759a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -45,6 +45,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; @@ -225,8 +226,8 @@ public void validateOzoneObj(OzoneObj obj) throws OMException { throw new OMException("Prefix name is required.", PREFIX_NOT_FOUND); } if (!prefixName.endsWith("/")) { - throw new OMException("Invalid prefix name: " + prefixName, - PREFIX_NOT_FOUND); + throw new OMException("Missing trailing slash '/' in prefix name: " + prefixName, + INVALID_PATH_IN_ACL_REQUEST); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index b2d495a423ec..8671ff107131 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -128,7 +128,7 @@ public void testValidationFailure() { ); OMClientResponse response1 = invalidRequest1.validateAndUpdateCache(ozoneManager, 1); - assertEquals(OzoneManagerProtocolProtos.Status.PREFIX_NOT_FOUND, + assertEquals(OzoneManagerProtocolProtos.Status.INVALID_PATH_IN_ACL_REQUEST, response1.getOMResponse().getStatus()); // Not a valid FS path From 9a6ece27e7907f06a48ceb55406c2e196977db4c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 5 Mar 2024 19:26:36 +0100 Subject: [PATCH 100/108] HDDS-10457. Remove dependency commons-pool2 (#6317) --- hadoop-hdds/common/pom.xml | 4 ---- hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 1 - hadoop-ozone/dist/src/main/license/jar-report.txt | 1 - hadoop-ozone/ozonefs-shaded/pom.xml | 1 - pom.xml | 6 ------ 5 files changed, 13 deletions(-) diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 807ddf7f2765..9189122deebe 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -135,10 +135,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.commons - commons-pool2 - org.bouncycastle bcpkix-jdk18on diff --git a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt index 97942b2ae43f..a705411438d1 100644 --- a/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt +++ b/hadoop-ozone/dist/src/main/license/bin/LICENSE.txt @@ -360,7 +360,6 @@ Apache License 2.0 org.apache.commons:commons-compress org.apache.commons:commons-configuration2 org.apache.commons:commons-lang3 - org.apache.commons:commons-pool2 org.apache.commons:commons-text org.apache.curator:curator-client org.apache.curator:curator-framework diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index ce2d4136b113..70fecc866da3 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -28,7 +28,6 @@ share/ozone/lib/commons-lang3.jar share/ozone/lib/commons-lang.jar share/ozone/lib/commons-logging.jar share/ozone/lib/commons-net.jar -share/ozone/lib/commons-pool2.jar share/ozone/lib/commons-text.jar share/ozone/lib/commons-validator.jar share/ozone/lib/commons-fileupload.jar diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index 6ff4e3c701a7..402c30ab2d39 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -128,7 +128,6 @@ org.apache.commons.digester.**.* org.apache.commons.io.**.* org.apache.commons.logging.**.* - org.apache.commons.pool2.**.* org.apache.commons.validator.**.* org.apache.commons.lang3.**.* org.sqlite.**.* diff --git a/pom.xml b/pom.xml index 898b675893a3..2689a3ccf81d 100644 --- a/pom.xml +++ b/pom.xml @@ -126,7 +126,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.1 3.6.1 3.10.0 - 2.6.0 1.11.0 1.6 1.5 @@ -791,11 +790,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs commons-net ${commons-net.version} - - org.apache.commons - commons-pool2 - ${commons-pool2.version} - commons-validator commons-validator From 4243721dda6919e7614f8d1258027e2aec55bc28 Mon Sep 17 00:00:00 2001 From: Ethan Rose <33912936+errose28@users.noreply.github.com> Date: Tue, 5 Mar 2024 13:58:59 -0800 Subject: [PATCH 101/108] HDDS-10450. Add GitHub actions labeler for the reconciliation feature branch. (#6310) --- .github/labeler.yml | 21 +++++++++++++++++++++ .github/workflows/label-pr.yml | 29 +++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 .github/labeler.yml create mode 100644 .github/workflows/label-pr.yml diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 000000000000..fc68079617a6 --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,21 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Configuration for .github/workflows/label-pr.yml + +# This rule can be deleted once the container reconciliation feature branch is merged. +container-reconciliation: +- base-branch: HDDS-10239-container-reconciliation + diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml new file mode 100644 index 000000000000..abc620b7ef09 --- /dev/null +++ b/.github/workflows/label-pr.yml @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This workflow reads its configuration from the .github/labeler.yml file. +name: pull-request-labeler +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 + From 87d8d6155ca1d68773061017385c11fcfde43214 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 6 Mar 2024 13:13:08 +0100 Subject: [PATCH 102/108] HDDS-10467. Reduce metrics visibility (#6332) --- .../hdds/scm/ContainerClientMetrics.java | 15 ++++------ .../ozone/grpc/metrics/GrpcMetrics.java | 4 +-- .../transport/server/ratis/CSMMetrics.java | 15 +--------- .../replication/MeasuredReplicator.java | 22 +++++--------- .../server/events/EventWatcherMetrics.java | 14 +++------ .../hdds/scm/safemode/SafeModeMetrics.java | 12 ++++---- .../scm/security/RootCARotationMetrics.java | 2 +- .../recon/TestReconWithOzoneManager.java | 10 +++---- .../hadoop/ozone/om/OMPerformanceMetrics.java | 26 ++++++++-------- .../om/ratis/OzoneManagerDoubleBuffer.java | 6 ---- .../OzoneManagerDoubleBufferMetrics.java | 10 ++----- .../ozone/om/ratis/metrics/package-info.java | 21 ------------- .../ratis/TestOzoneManagerDoubleBuffer.java | 1 - ...eManagerDoubleBufferWithDummyResponse.java | 1 - .../metrics/OzoneManagerSyncMetrics.java | 30 +++++++++---------- .../TestOzoneManagerServiceProviderImpl.java | 16 +++++----- 16 files changed, 70 insertions(+), 135 deletions(-) rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/{metrics => }/OzoneManagerDoubleBufferMetrics.java (95%) delete mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java index 1045f7a6a172..d51dfa416313 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/ContainerClientMetrics.java @@ -111,28 +111,23 @@ public void recordWriteChunk(Pipeline pipeline, long chunkSizeBytes) { totalWriteChunkBytes.incr(chunkSizeBytes); } - @VisibleForTesting - public MutableCounterLong getTotalWriteChunkBytes() { + MutableCounterLong getTotalWriteChunkBytes() { return totalWriteChunkBytes; } - @VisibleForTesting - public MutableCounterLong getTotalWriteChunkCalls() { + MutableCounterLong getTotalWriteChunkCalls() { return totalWriteChunkCalls; } - @VisibleForTesting - public Map getWriteChunkBytesByPipeline() { + Map getWriteChunkBytesByPipeline() { return writeChunkBytesByPipeline; } - @VisibleForTesting - public Map getWriteChunkCallsByPipeline() { + Map getWriteChunkCallsByPipeline() { return writeChunkCallsByPipeline; } - @VisibleForTesting - public Map getWriteChunksCallsByLeaders() { + Map getWriteChunksCallsByLeaders() { return writeChunksCallsByLeaders; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java index 23c3dbaf1520..6bd83b44a93f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/grpc/metrics/GrpcMetrics.java @@ -200,11 +200,11 @@ public long getUnknownMessagesReceived() { return unknownMessagesReceived.value(); } - public MutableRate getGrpcQueueTime() { + MutableRate getGrpcQueueTime() { return grpcQueueTime; } - public MutableRate getGrpcProcessingTime() { + MutableRate getGrpcProcessingTime() { return grpcProcessingTime; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java index b776dc903de4..87572768e4af 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.container.common.transport.server.ratis; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.metrics2.MetricsSystem; @@ -132,67 +131,55 @@ public void incNumApplyTransactionsFails() { numApplyTransactionFails.incr(); } - @VisibleForTesting public long getNumWriteStateMachineOps() { return numWriteStateMachineOps.value(); } - @VisibleForTesting public long getNumQueryStateMachineOps() { return numQueryStateMachineOps.value(); } - @VisibleForTesting public long getNumApplyTransactionsOps() { return numApplyTransactionOps.value(); } - @VisibleForTesting public long getNumWriteStateMachineFails() { return numWriteStateMachineFails.value(); } - @VisibleForTesting public long getNumWriteDataFails() { return numWriteDataFails.value(); } - @VisibleForTesting public long getNumQueryStateMachineFails() { return numQueryStateMachineFails.value(); } - @VisibleForTesting public long getNumApplyTransactionsFails() { return numApplyTransactionFails.value(); } - @VisibleForTesting public long getNumReadStateMachineFails() { return numReadStateMachineFails.value(); } - @VisibleForTesting public long getNumReadStateMachineMissCount() { return numReadStateMachineMissCount.value(); } - @VisibleForTesting public long getNumReadStateMachineOps() { return numReadStateMachineOps.value(); } - @VisibleForTesting public long getNumBytesWrittenCount() { return numBytesWrittenCount.value(); } - @VisibleForTesting public long getNumBytesCommittedCount() { return numBytesCommittedCount.value(); } - public MutableRate getApplyTransactionLatencyNs() { + MutableRate getApplyTransactionLatencyNs() { return applyTransactionNs; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java index fa3763d88067..7becbe752189 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/MeasuredReplicator.java @@ -27,7 +27,6 @@ import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status; -import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.util.Time; /** @@ -98,38 +97,31 @@ public void close() throws Exception { DefaultMetricsSystem.instance().unregisterSource(metricsName()); } - @VisibleForTesting - public MutableCounterLong getSuccess() { + MutableCounterLong getSuccess() { return success; } - @VisibleForTesting - public MutableGaugeLong getSuccessTime() { + MutableGaugeLong getSuccessTime() { return successTime; } - @VisibleForTesting - public MutableGaugeLong getFailureTime() { + MutableGaugeLong getFailureTime() { return failureTime; } - @VisibleForTesting - public MutableCounterLong getFailure() { + MutableCounterLong getFailure() { return failure; } - @VisibleForTesting - public MutableGaugeLong getQueueTime() { + MutableGaugeLong getQueueTime() { return queueTime; } - @VisibleForTesting - public MutableGaugeLong getTransferredBytes() { + MutableGaugeLong getTransferredBytes() { return transferredBytes; } - @VisibleForTesting - public MutableGaugeLong getFailureBytes() { + MutableGaugeLong getFailureBytes() { return failureBytes; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java index dc217476a60c..047386730818 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventWatcherMetrics.java @@ -21,8 +21,6 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableRate; -import com.google.common.annotations.VisibleForTesting; - /** * Metrics for any event watcher. */ @@ -56,23 +54,19 @@ public void updateFinishingTime(long duration) { completionTime.add(duration); } - @VisibleForTesting - public MutableCounterLong getTrackedEvents() { + MutableCounterLong getTrackedEvents() { return trackedEvents; } - @VisibleForTesting - public MutableCounterLong getTimedOutEvents() { + MutableCounterLong getTimedOutEvents() { return timedOutEvents; } - @VisibleForTesting - public MutableCounterLong getCompletedEvents() { + MutableCounterLong getCompletedEvents() { return completedEvents; } - @VisibleForTesting - public MutableRate getCompletionTime() { + MutableRate getCompletionTime() { return completionTime; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java index 80b8257c40b2..e1a7d2dca531 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeMetrics.java @@ -78,28 +78,28 @@ public void incCurrentContainersWithOneReplicaReportedCount() { this.currentContainersWithOneReplicaReportedCount.incr(); } - public MutableCounterLong getNumHealthyPipelinesThreshold() { + MutableCounterLong getNumHealthyPipelinesThreshold() { return numHealthyPipelinesThreshold; } - public MutableCounterLong getCurrentHealthyPipelinesCount() { + MutableCounterLong getCurrentHealthyPipelinesCount() { return currentHealthyPipelinesCount; } - public MutableCounterLong + MutableCounterLong getNumPipelinesWithAtleastOneReplicaReportedThreshold() { return numPipelinesWithAtleastOneReplicaReportedThreshold; } - public MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { + MutableCounterLong getCurrentPipelinesWithAtleastOneReplicaCount() { return currentPipelinesWithAtleastOneReplicaReportedCount; } - public MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { + MutableCounterLong getNumContainerWithOneReplicaReportedThreshold() { return numContainerWithOneReplicaReportedThreshold; } - public MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { + MutableCounterLong getCurrentContainersWithOneReplicaReportedCount() { return currentContainersWithOneReplicaReportedCount; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java index fcd52d0ebd76..1c1a1c624502 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationMetrics.java @@ -67,7 +67,7 @@ private RootCARotationMetrics(MetricsSystem ms) { this.ms = ms; } - public MutableGaugeLong getSuccessTimeInNs() { + MutableGaugeLong getSuccessTimeInNs() { return successTimeInNs; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 9589b1c40056..c0e5acc20e7a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -202,7 +202,7 @@ public void testOmDBSyncing() throws Exception { // verify sequence number after full snapshot assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); //add 4 keys to check for delta updates addKeys(1, 5); @@ -220,7 +220,7 @@ public void testOmDBSyncing() throws Exception { //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); long beforeRestartSnapShotTimeStamp = getReconTaskAttributeFromJson( taskStatusResponse, @@ -260,7 +260,7 @@ public void testOmDBSyncing() throws Exception { //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); } // This test simulates the mis-match in sequence number between Recon OM @@ -314,7 +314,7 @@ public void testOmDBSyncWithSeqNumberMismatch() throws Exception { // verify sequence number after incremental delta snapshot assertEquals(omLatestSeqNumber, reconLatestSeqNumber); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); String volume = "vol15"; String bucket = "bucket15"; @@ -356,7 +356,7 @@ public void testOmDBSyncWithSeqNumberMismatch() throws Exception { reconLatestSeqNumber = ((RDBStore) reconMetadataManagerInstance.getStore()).getDb() .getLatestSequenceNumber(); - assertEquals(0, metrics.getSequenceNumberLag().value()); + assertEquals(0, metrics.getSequenceNumberLag()); assertEquals(omLatestSeqNumber, reconLatestSeqNumber); reconLatestSeqNumber = ((RDBStore) reconMetadataManagerInstance.getStore()).getDb() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index d118e2f4ecc9..f2f11025158d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -123,20 +123,20 @@ public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } - public MutableRate getLookupRefreshLocationLatencyNs() { + MutableRate getLookupRefreshLocationLatencyNs() { return lookupRefreshLocationLatencyNs; } - public MutableRate getLookupGenerateBlockTokenLatencyNs() { + MutableRate getLookupGenerateBlockTokenLatencyNs() { return lookupGenerateBlockTokenLatencyNs; } - public MutableRate getLookupReadKeyInfoLatencyNs() { + MutableRate getLookupReadKeyInfoLatencyNs() { return lookupReadKeyInfoLatencyNs; } - public MutableRate getLookupAclCheckLatencyNs() { + MutableRate getLookupAclCheckLatencyNs() { return lookupAclCheckLatencyNs; } @@ -144,7 +144,7 @@ public void addS3VolumeContextLatencyNs(long latencyInNs) { s3VolumeContextLatencyNs.add(latencyInNs); } - public MutableRate getLookupResolveBucketLatencyNs() { + MutableRate getLookupResolveBucketLatencyNs() { return lookupResolveBucketLatencyNs; } @@ -152,27 +152,27 @@ public void addGetKeyInfoLatencyNs(long value) { getKeyInfoLatencyNs.add(value); } - public MutableRate getGetKeyInfoAclCheckLatencyNs() { + MutableRate getGetKeyInfoAclCheckLatencyNs() { return getKeyInfoAclCheckLatencyNs; } - public MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { + MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { return getKeyInfoGenerateBlockTokenLatencyNs; } - public MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { + MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { return getKeyInfoReadKeyInfoLatencyNs; } - public MutableRate getGetKeyInfoRefreshLocationLatencyNs() { + MutableRate getGetKeyInfoRefreshLocationLatencyNs() { return getKeyInfoRefreshLocationLatencyNs; } - public MutableRate getGetKeyInfoResolveBucketLatencyNs() { + MutableRate getGetKeyInfoResolveBucketLatencyNs() { return getKeyInfoResolveBucketLatencyNs; } - public MutableRate getGetKeyInfoSortDatanodesLatencyNs() { + MutableRate getGetKeyInfoSortDatanodesLatencyNs() { return getKeyInfoSortDatanodesLatencyNs; } @@ -216,11 +216,11 @@ public MutableRate getValidateAndUpdateCacheLatencyNs() { return validateAndUpdateCacheLatencyNs; } - public MutableRate getListKeysAclCheckLatencyNs() { + MutableRate getListKeysAclCheckLatencyNs() { return listKeysAclCheckLatencyNs; } - public MutableRate getListKeysResolveBucketLatencyNs() { + MutableRate getListKeysResolveBucketLatencyNs() { return listKeysResolveBucketLatencyNs; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index d3db4120e61b..857005bd9292 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -44,7 +44,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.codec.OMDBDefinition; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -595,29 +594,24 @@ private synchronized void swapCurrentAndReadyBuffer() { readyBuffer = temp; } - @VisibleForTesting OzoneManagerDoubleBufferMetrics getMetrics() { return metrics; } /** @return the flushed transaction count to OM DB. */ - @VisibleForTesting long getFlushedTransactionCountForTesting() { return flushedTransactionCount.get(); } /** @return total number of flush iterations run by sync thread. */ - @VisibleForTesting long getFlushIterationsForTesting() { return flushIterations.get(); } - @VisibleForTesting int getCurrentBufferSize() { return currentBuffer.size(); } - @VisibleForTesting int getReadyBufferSize() { return readyBuffer.size(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java similarity index 95% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java index 351f18528931..afa162cc3ad8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/OzoneManagerDoubleBufferMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBufferMetrics.java @@ -16,9 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.ratis.metrics; - -import com.google.common.annotations.VisibleForTesting; +package org.apache.hadoop.ozone.om.ratis; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; @@ -114,8 +112,7 @@ public void updateFlushTime(long time) { flushTime.add(time); } - @VisibleForTesting - public MutableRate getFlushTime() { + MutableRate getFlushTime() { return flushTime; } @@ -142,8 +139,7 @@ public void updateFlush(int flushedTransactionsInOneIteration) { updateQueueSize(flushedTransactionsInOneIteration); } - @VisibleForTesting - public MutableStat getQueueSize() { + MutableStat getQueueSize() { return queueSize; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java deleted file mode 100644 index e41c645b581a..000000000000 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/metrics/package-info.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -/** - * package which contains metrics classes. - */ -package org.apache.hadoop.ozone.om.ratis.metrics; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java index e4dd0ab5ada8..125c9efcaf2d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBuffer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.ozone.om.S3SecretManagerImpl; import org.apache.hadoop.ozone.om.S3SecretCache; import org.apache.hadoop.ozone.om.S3SecretLockedManager; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.ozone.om.request.s3.security.S3GetSecretRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.bucket.OMBucketCreateResponse; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 61be29eade6d..22272182997e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -37,7 +37,6 @@ .CreateBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; -import org.apache.hadoop.ozone.om.ratis.metrics.OzoneManagerDoubleBufferMetrics; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java index e6ad328ab98f..e1a3c97d2be2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/OzoneManagerSyncMetrics.java @@ -107,39 +107,39 @@ public void setAverageNumUpdatesInDeltaRequest(float avg) { averageNumUpdatesInDeltaRequest.set(avg); } - public MutableCounterLong getNumSnapshotRequests() { - return numSnapshotRequests; + public long getNumSnapshotRequests() { + return numSnapshotRequests.value(); } - public MutableCounterLong getNumSnapshotRequestsFailed() { - return numSnapshotRequestsFailed; + public long getNumSnapshotRequestsFailed() { + return numSnapshotRequestsFailed.value(); } - public MutableRate getSnapshotRequestLatency() { + MutableRate getSnapshotRequestLatency() { return snapshotRequestLatency; } - public MutableCounterLong getNumDeltaRequestsFailed() { - return numDeltaRequestsFailed; + public long getNumDeltaRequestsFailed() { + return numDeltaRequestsFailed.value(); } - public MutableCounterLong getNumUpdatesInDeltaTotal() { - return numUpdatesInDeltaTotal; + public long getNumUpdatesInDeltaTotal() { + return numUpdatesInDeltaTotal.value(); } - public MutableGaugeFloat getAverageNumUpdatesInDeltaRequest() { - return averageNumUpdatesInDeltaRequest; + public float getAverageNumUpdatesInDeltaRequest() { + return averageNumUpdatesInDeltaRequest.value(); } - public MutableCounterLong getNumNonZeroDeltaRequests() { - return numNonZeroDeltaRequests; + public long getNumNonZeroDeltaRequests() { + return numNonZeroDeltaRequests.value(); } public void setSequenceNumberLag(long lag) { sequenceNumberLag.set(lag); } - public MutableGaugeLong getSequenceNumberLag() { - return sequenceNumberLag; + public long getSequenceNumberLag() { + return sequenceNumberLag.value(); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index d15cd6142d3c..032bff80ade3 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -297,8 +297,8 @@ public void testGetAndApplyDeltaUpdatesFromOM( OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(4.0, - metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); - assertEquals(1, metrics.getNumNonZeroDeltaRequests().value()); + metrics.getAverageNumUpdatesInDeltaRequest(), 0.0); + assertEquals(1, metrics.getNumNonZeroDeltaRequests()); // In this method, we have to assert the "GET" path and the "APPLY" path. @@ -372,8 +372,8 @@ public void testGetAndApplyDeltaUpdatesFromOMWithLimit( OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(1.0, - metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); - assertEquals(3, metrics.getNumNonZeroDeltaRequests().value()); + metrics.getAverageNumUpdatesInDeltaRequest(), 0.0); + assertEquals(3, metrics.getNumNonZeroDeltaRequests()); // In this method, we have to assert the "GET" path and the "APPLY" path. @@ -417,7 +417,7 @@ public void testSyncDataFromOMFullSnapshot( reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol); OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); - assertEquals(0, metrics.getNumSnapshotRequests().value()); + assertEquals(0, metrics.getNumSnapshotRequests()); // Should trigger full snapshot request. ozoneManagerServiceProvider.syncDataFromOM(); @@ -429,7 +429,7 @@ public void testSyncDataFromOMFullSnapshot( assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); - assertEquals(1, metrics.getNumSnapshotRequests().value()); + assertEquals(1, metrics.getNumSnapshotRequests()); } @Test @@ -470,7 +470,7 @@ public void testSyncDataFromOMDeltaUpdates( verify(reconTaskControllerMock, times(1)) .consumeOMEvents(any(OMUpdateEventBatch.class), any(OMMetadataManager.class)); - assertEquals(0, metrics.getNumSnapshotRequests().value()); + assertEquals(0, metrics.getNumSnapshotRequests()); } @Test @@ -509,7 +509,7 @@ public void testSyncDataFromOMFullSnapshotForSNNFE( assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); - assertEquals(1, metrics.getNumSnapshotRequests().value()); + assertEquals(1, metrics.getNumSnapshotRequests()); } private ReconTaskController getMockTaskController() { From 2f0535394088f610f519a3bfcd486c4bdc8f84a3 Mon Sep 17 00:00:00 2001 From: XiChen <32928346+xichen01@users.noreply.github.com> Date: Wed, 6 Mar 2024 22:03:52 +0800 Subject: [PATCH 103/108] HDDS-10384. RPC client reusing thread resources. (#6326) --- .../scm/storage/AbstractCommitWatcher.java | 2 +- .../hdds/scm/storage/BlockOutputStream.java | 5 +--- .../hdds/scm/storage/CommitWatcher.java | 23 +++++++++++++++---- .../scm/storage/RatisBlockOutputStream.java | 9 +++----- .../ECReconstructionCoordinator.java | 3 +-- .../ozone/client/io/ECKeyOutputStream.java | 11 ++++----- .../hadoop/ozone/client/rpc/RpcClient.java | 3 +-- .../hdds/scm/storage/TestCommitWatcher.java | 4 ++-- .../client/rpc/TestOzoneAtRestEncryption.java | 12 +++++++--- .../rpc/TestOzoneRpcClientAbstract.java | 1 + 10 files changed, 42 insertions(+), 31 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java index 0c5501c7922c..957f761ccbc2 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractCommitWatcher.java @@ -73,7 +73,7 @@ SortedMap> getCommitIndexMap() { return commitIndexMap; } - void updateCommitInfoMap(long index, List buffers) { + synchronized void updateCommitInfoMap(long index, List buffers) { commitIndexMap.computeIfAbsent(index, k -> new LinkedList<>()) .addAll(buffers); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 5ff5da60989e..5c0516d7bd4f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -25,7 +25,6 @@ import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; @@ -182,8 +181,7 @@ public BlockOutputStream( (long) flushPeriod * streamBufferArgs.getStreamBufferSize() == streamBufferArgs .getStreamBufferFlushSize()); - // A single thread executor handle the responses of async requests - responseExecutor = Executors.newSingleThreadExecutor(); + this.responseExecutor = blockOutputStreamResourceProvider.get(); bufferList = null; totalDataFlushedLength = 0; writtenDataLength = 0; @@ -657,7 +655,6 @@ public void cleanup(boolean invalidateClient) { bufferList.clear(); } bufferList = null; - responseExecutor.shutdown(); } /** diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java index 3c7f8a2360c8..aa339409eceb 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java @@ -24,6 +24,7 @@ */ package org.apache.hadoop.hdds.scm.storage; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.ozone.common.ChunkBuffer; @@ -32,6 +33,7 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; /** * This class executes watchForCommit on ratis pipeline and releases @@ -42,8 +44,8 @@ class CommitWatcher extends AbstractCommitWatcher { private final BufferPool bufferPool; // future Map to hold up all putBlock futures - private final ConcurrentMap> futureMap = new ConcurrentHashMap<>(); + private final ConcurrentMap> + futureMap = new ConcurrentHashMap<>(); CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient) { super(xceiverClient); @@ -67,11 +69,24 @@ void releaseBuffers(long index) { + totalLength + ": existing = " + futureMap.keySet()); } - ConcurrentMap> getFutureMap() { + @VisibleForTesting + ConcurrentMap> getFutureMap() { return futureMap; } + public void putFlushFuture(long flushPos, CompletableFuture flushFuture) { + futureMap.compute(flushPos, + (key, previous) -> previous == null ? flushFuture : + previous.thenCombine(flushFuture, (prev, curr) -> curr)); + } + + + public void waitOnFlushFutures() throws InterruptedException, ExecutionException { + // wait for all the transactions to complete + CompletableFuture.allOf(futureMap.values().toArray( + new CompletableFuture[0])).get(); + } + @Override public void cleanup() { super.cleanup(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index 6a2758d36486..b587b1d13171 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -113,16 +113,13 @@ void updateCommitInfo(XceiverClientReply reply, List buffers) { } @Override - void putFlushFuture(long flushPos, - CompletableFuture flushFuture) { - commitWatcher.getFutureMap().put(flushPos, flushFuture); + void putFlushFuture(long flushPos, CompletableFuture flushFuture) { + commitWatcher.putFlushFuture(flushPos, flushFuture); } @Override void waitOnFlushFutures() throws InterruptedException, ExecutionException { - // wait for all the transactions to complete - CompletableFuture.allOf(commitWatcher.getFutureMap().values().toArray( - new CompletableFuture[0])).get(); + commitWatcher.waitOnFlushFutures(); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index a45c15844847..90756bbc8898 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -101,8 +101,7 @@ public class ECReconstructionCoordinator implements Closeable { private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; - // TODO: Adjusts to the appropriate value when the ec-reconstruct-writer thread pool is used. - private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 0; + private static final int EC_RECONSTRUCT_STRIPE_WRITE_POOL_MIN_SIZE = 5; private final ECContainerOperationClient containerOperationClient; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java index 878558073f75..0cb3973e0411 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ECKeyOutputStream.java @@ -43,8 +43,6 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -66,7 +64,6 @@ public final class ECKeyOutputStream extends KeyOutputStream private final int numParityBlks; private final ByteBufferPool bufferPool; private final RawErasureEncoder encoder; - private final ExecutorService flushExecutor; private final Future flushFuture; private final AtomicLong flushCheckpoint; @@ -119,12 +116,13 @@ private ECKeyOutputStream(Builder builder) { this.writeOffset = 0; this.encoder = CodecUtil.createRawEncoderWithFallback( builder.getReplicationConfig()); - this.flushExecutor = Executors.newSingleThreadExecutor(); S3Auth s3Auth = builder.getS3CredentialsProvider().get(); ThreadLocal s3CredentialsProvider = builder.getS3CredentialsProvider(); - flushExecutor.submit(() -> s3CredentialsProvider.set(s3Auth)); - this.flushFuture = this.flushExecutor.submit(this::flushStripeFromQueue); + this.flushFuture = builder.getExecutorServiceSupplier().get().submit(() -> { + s3CredentialsProvider.set(s3Auth); + return flushStripeFromQueue(); + }); this.flushCheckpoint = new AtomicLong(0); this.atomicKeyCreation = builder.getAtomicKeyCreation(); } @@ -495,7 +493,6 @@ public void close() throws IOException { } catch (InterruptedException e) { throw new IOException("Flushing thread was interrupted", e); } finally { - flushExecutor.shutdownNow(); closeCurrentStreamEntry(); blockOutputStreamEntryPool.cleanup(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index bcb08f1d9130..178a9919c114 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -196,8 +196,7 @@ public class RpcClient implements ClientProtocol { // for reconstruction. private static final int EC_RECONSTRUCT_STRIPE_READ_POOL_MIN_SIZE = 3; - // TODO: Adjusts to the appropriate value when the writeThreadPool is used. - private static final int WRITE_POOL_MIN_SIZE = 0; + private static final int WRITE_POOL_MIN_SIZE = 1; private final ConfigurationSource conf; private final OzoneManagerClientProtocol ozoneManagerClient; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java index 2b13daaca291..c3ea911f1935 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java @@ -209,7 +209,7 @@ public void testReleaseBuffers() throws Exception { return v; }); futures.add(future); - watcher.getFutureMap().put(length, future); + watcher.putFlushFuture(length, future); replies.add(reply); } @@ -282,7 +282,7 @@ public void testReleaseBuffersOnException() throws Exception { return v; }); futures.add(future); - watcher.getFutureMap().put(length, future); + watcher.putFlushFuture(length, future); replies.add(reply); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 29cf1bc5e117..44303ed2ff23 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -213,6 +213,14 @@ static void shutdown() throws IOException { } } + static void reInitClient() throws IOException { + ozClient = OzoneClientFactory.getRpcClient(conf); + store = ozClient.getObjectStore(); + TestOzoneRpcClient.setOzClient(ozClient); + TestOzoneRpcClient.setStore(store); + } + + @ParameterizedTest @EnumSource void testPutKeyWithEncryption(BucketLayout bucketLayout) throws Exception { @@ -770,9 +778,7 @@ void testGetKeyProvider() throws Exception { KeyProvider kp3 = ozClient.getObjectStore().getKeyProvider(); assertNotEquals(kp3, kpSpy); - // Restore ozClient and store - TestOzoneRpcClient.setOzClient(OzoneClientFactory.getRpcClient(conf)); - TestOzoneRpcClient.setStore(ozClient.getObjectStore()); + reInitClient(); } private static RepeatedOmKeyInfo getMatchedKeyInfo( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index b0d3609651fb..b8386869308b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -1650,6 +1650,7 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException, } latch.countDown(); } catch (IOException ex) { + LOG.error("Execution failed: ", ex); latch.countDown(); failCount.incrementAndGet(); } From 418528adea50541923f15b844c2afff03be13e47 Mon Sep 17 00:00:00 2001 From: tanvipenumudy <46785609+tanvipenumudy@users.noreply.github.com> Date: Wed, 6 Mar 2024 23:26:12 +0530 Subject: [PATCH 104/108] HDDS-10460. Refine audit logging for bucket property update operations (#6329) --- .../hadoop/ozone/om/helpers/OmBucketArgs.java | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 34e93c1674af..168e9e952881 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -187,6 +187,27 @@ public Map toAuditMap() { if (this.ownerName != null) { auditMap.put(OzoneConsts.OWNER, this.ownerName); } + if (this.quotaInBytesSet && quotaInBytes > 0 || + (this.quotaInBytes != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_BYTES, + String.valueOf(this.quotaInBytes)); + } + if (this.quotaInNamespaceSet && quotaInNamespace > 0 || + (this.quotaInNamespace != OzoneConsts.QUOTA_RESET)) { + auditMap.put(OzoneConsts.QUOTA_IN_NAMESPACE, + String.valueOf(this.quotaInNamespace)); + } + if (this.bekInfo != null) { + auditMap.put(OzoneConsts.BUCKET_ENCRYPTION_KEY, + this.bekInfo.getKeyName()); + } + if (this.defaultReplicationConfig != null) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, String.valueOf( + this.defaultReplicationConfig.getType())); + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + this.defaultReplicationConfig.getReplicationConfig() + .getReplication()); + } return auditMap; } From 309e45996ce549f03cb2a3c7f62c7c24f4c773e4 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 6 Mar 2024 20:13:47 +0100 Subject: [PATCH 105/108] HDDS-10472. Audit log should include EC replication config (#6338) --- .../hadoop/hdds/client/ECReplicationConfig.java | 8 ++++++++ .../hadoop/ozone/om/request/RequestAuditor.java | 16 ++++++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java index 25ea315af284..9469fee7e284 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ECReplicationConfig.java @@ -154,6 +154,14 @@ public String getReplication() { + chunkKB(); } + /** Similar to {@link #getReplication()}, but applies to proto structure, without any validation. */ + public static String toString(HddsProtos.ECReplicationConfig proto) { + return proto.getCodec() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getData() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getParity() + EC_REPLICATION_PARAMS_DELIMITER + + proto.getEcChunkSize(); + } + public HddsProtos.ECReplicationConfig toProto() { return HddsProtos.ECReplicationConfig.newBuilder() .setData(data) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java index 93b7c92902b6..c0872db0fd61 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/RequestAuditor.java @@ -22,6 +22,8 @@ import java.util.LinkedHashMap; import java.util.Map; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditAction; import org.apache.hadoop.ozone.audit.AuditMessage; @@ -68,10 +70,16 @@ default Map buildKeyArgsAuditMap(KeyArgs keyArgs) { auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName()); auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(keyArgs.getDataSize())); - auditMap.put(OzoneConsts.REPLICATION_TYPE, - (keyArgs.getType() != null) ? keyArgs.getType().name() : null); - auditMap.put(OzoneConsts.REPLICATION_FACTOR, - (keyArgs.getFactor() != null) ? keyArgs.getFactor().name() : null); + if (keyArgs.hasType()) { + auditMap.put(OzoneConsts.REPLICATION_TYPE, keyArgs.getType().name()); + } + if (keyArgs.hasFactor() && keyArgs.getFactor() != HddsProtos.ReplicationFactor.ZERO) { + auditMap.put(OzoneConsts.REPLICATION_FACTOR, keyArgs.getFactor().name()); + } + if (keyArgs.hasEcReplicationConfig()) { + auditMap.put(OzoneConsts.REPLICATION_CONFIG, + ECReplicationConfig.toString(keyArgs.getEcReplicationConfig())); + } return auditMap; } } From 140c5deffb848efc762aacf0cad9aa617c9c5374 Mon Sep 17 00:00:00 2001 From: tanvipenumudy <46785609+tanvipenumudy@users.noreply.github.com> Date: Thu, 7 Mar 2024 04:15:08 +0530 Subject: [PATCH 106/108] HDDS-9343. Shift sortDatanodes logic to OM (#5391) --- .../hadoop/hdds/protocol/DatanodeDetails.java | 25 +++ .../apache/hadoop/hdds/scm/net/InnerNode.java | 14 ++ .../hadoop/hdds/scm/net/InnerNodeImpl.java | 159 ++++++++++++++- .../hdds/scm/net/NetworkTopologyImpl.java | 20 +- .../org/apache/hadoop/hdds/scm/net/Node.java | 20 ++ .../apache/hadoop/hdds/scm/net/NodeImpl.java | 15 ++ .../hdds/scm/net/NodeSchemaManager.java | 8 + .../apache/hadoop/ozone/OzoneConfigKeys.java | 5 + .../src/main/resources/ozone-default.xml | 8 + .../hdds/scm/client/ScmTopologyClient.java | 127 ++++++++++++ .../hadoop/hdds/scm/client/package-info.java | 24 +++ .../protocol/ScmBlockLocationProtocol.java | 8 + ...ocationProtocolClientSideTranslatorPB.java | 43 ++++ .../src/main/proto/hdds.proto | 24 +++ .../src/main/proto/ScmServerProtocol.proto | 11 +- .../hadoop/hdds/scm/node/DatanodeInfo.java | 1 - .../hadoop/hdds/scm/node/NodeStatus.java | 1 - ...ocationProtocolServerSideTranslatorPB.java | 15 ++ .../scm/server/SCMBlockProtocolServer.java | 6 + .../hadoop/ozone/TestDelegationToken.java | 4 + .../ozone/TestGetClusterTreeInformation.java | 87 ++++++++ .../hadoop/ozone/TestOMSortDatanodes.java | 187 ++++++++++++++++++ .../hadoop/ozone/TestSecureOzoneCluster.java | 11 ++ .../om/TestOmContainerLocationCache.java | 11 +- .../om/TestOzoneManagerListVolumesSecure.java | 3 + .../hadoop/ozone/om/KeyManagerImpl.java | 91 +++++++-- .../apache/hadoop/ozone/om/OzoneManager.java | 42 ++++ .../hadoop/ozone/om/OmTestManagers.java | 5 + .../om/ScmBlockLocationTestingClient.java | 11 ++ .../hadoop/ozone/om/TestKeyManagerUnit.java | 80 +------- 30 files changed, 959 insertions(+), 107 deletions(-) create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index 5b6fb6fe9b81..a30f8414dce7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -364,6 +364,9 @@ public static DatanodeDetails.Builder newBuilder( if (datanodeDetailsProto.hasNetworkLocation()) { builder.setNetworkLocation(datanodeDetailsProto.getNetworkLocation()); } + if (datanodeDetailsProto.hasLevel()) { + builder.setLevel(datanodeDetailsProto.getLevel()); + } if (datanodeDetailsProto.hasPersistedOpState()) { builder.setPersistedOpState(datanodeDetailsProto.getPersistedOpState()); } @@ -456,6 +459,9 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder( if (!Strings.isNullOrEmpty(getNetworkLocation())) { builder.setNetworkLocation(getNetworkLocation()); } + if (getLevel() > 0) { + builder.setLevel(getLevel()); + } if (persistedOpState != null) { builder.setPersistedOpState(persistedOpState); } @@ -585,6 +591,7 @@ public static final class Builder { private String hostName; private String networkName; private String networkLocation; + private int level; private List ports; private String certSerialId; private String version; @@ -616,6 +623,7 @@ public Builder setDatanodeDetails(DatanodeDetails details) { this.hostName = details.getHostName(); this.networkName = details.getNetworkName(); this.networkLocation = details.getNetworkLocation(); + this.level = details.getLevel(); this.ports = details.getPorts(); this.certSerialId = details.getCertSerialId(); this.version = details.getVersion(); @@ -683,6 +691,11 @@ public Builder setNetworkLocation(String loc) { return this; } + public Builder setLevel(int level) { + this.level = level; + return this; + } + /** * Adds a DataNode Port. * @@ -807,6 +820,9 @@ public DatanodeDetails build() { if (networkName != null) { dn.setNetworkName(networkName); } + if (level > 0) { + dn.setLevel(level); + } return dn; } } @@ -1011,4 +1027,13 @@ public String getBuildDate() { public void setBuildDate(String date) { this.buildDate = date; } + + @Override + public HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + HddsProtos.NetworkNode networkNode = + HddsProtos.NetworkNode.newBuilder() + .setDatanodeDetails(toProtoBuilder(clientVersion).build()).build(); + return networkNode; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java index c87d826d2529..6074e7da0afc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNode.java @@ -20,6 +20,8 @@ import java.util.Collection; import java.util.List; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + /** * The interface defines an inner node in a network topology. * An inner node represents network topology entities, such as data center, @@ -89,4 +91,16 @@ N newInnerNode(String name, String location, InnerNode parent, int level, */ Node getLeaf(int leafIndex, List excludedScopes, Collection excludedNodes, int ancestorGen); + + @Override + HddsProtos.NetworkNode toProtobuf(int clientVersion); + + boolean equals(Object o); + + int hashCode(); + + static InnerNode fromProtobuf( + HddsProtos.InnerNode innerNode) { + return InnerNodeImpl.fromProtobuf(innerNode); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index f2648f3d294c..332dddac25c9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -27,6 +27,7 @@ import java.util.Map; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -47,10 +48,10 @@ public InnerNodeImpl newInnerNode(String name, String location, } } - static final Factory FACTORY = new Factory(); + public static final Factory FACTORY = new Factory(); // a map of node's network name to Node for quick search and keep // the insert order - private final HashMap childrenMap = + private HashMap childrenMap = new LinkedHashMap(); // number of descendant leaves under this node private int numOfLeaves; @@ -66,6 +67,76 @@ protected InnerNodeImpl(String name, String location, InnerNode parent, super(name, location, parent, level, cost); } + /** + * Construct an InnerNode from its name, network location, level, cost, + * childrenMap and number of leaves. This constructor is used as part of + * protobuf deserialization. + */ + protected InnerNodeImpl(String name, String location, int level, int cost, + HashMap childrenMap, int numOfLeaves) { + super(name, location, null, level, cost); + this.childrenMap = childrenMap; + this.numOfLeaves = numOfLeaves; + } + + /** + * InnerNodeImpl Builder to help construct an InnerNodeImpl object from + * protobuf objects. + */ + public static class Builder { + private String name; + private String location; + private int cost; + private int level; + private HashMap childrenMap = new LinkedHashMap<>(); + private int numOfLeaves; + + public Builder setName(String name) { + this.name = name; + return this; + } + + public Builder setLocation(String location) { + this.location = location; + return this; + } + + public Builder setCost(int cost) { + this.cost = cost; + return this; + } + + public Builder setLevel(int level) { + this.level = level; + return this; + } + + public Builder setChildrenMap( + List childrenMapList) { + HashMap newChildrenMap = new LinkedHashMap<>(); + for (HddsProtos.ChildrenMap childrenMapProto : + childrenMapList) { + String networkName = childrenMapProto.hasNetworkName() ? + childrenMapProto.getNetworkName() : null; + Node node = childrenMapProto.hasNetworkNode() ? + Node.fromProtobuf(childrenMapProto.getNetworkNode()) : null; + newChildrenMap.put(networkName, node); + } + this.childrenMap = newChildrenMap; + return this; + } + + public Builder setNumOfLeaves(int numOfLeaves) { + this.numOfLeaves = numOfLeaves; + return this; + } + + public InnerNodeImpl build() { + return new InnerNodeImpl(name, location, level, cost, childrenMap, + numOfLeaves); + } + } + /** @return the number of children this node has */ private int getNumOfChildren() { return childrenMap.size(); @@ -77,6 +148,11 @@ public int getNumOfLeaves() { return numOfLeaves; } + /** @return a map of node's network name to Node. */ + public HashMap getChildrenMap() { + return childrenMap; + } + /** * @return number of its all nodes at level level. Here level is a * relative level. If level is 1, means node itself. If level is 2, means its @@ -390,14 +466,83 @@ public Node getLeaf(int leafIndex, List excludedScopes, } @Override - public boolean equals(Object to) { - if (to == null) { - return false; + public HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + + HddsProtos.InnerNode.Builder innerNode = + HddsProtos.InnerNode.newBuilder() + .setNumOfLeaves(numOfLeaves) + .setNodeTopology( + NodeImpl.toProtobuf(getNetworkName(), getNetworkLocation(), + getLevel(), getCost())); + + if (childrenMap != null && !childrenMap.isEmpty()) { + for (Map.Entry entry : childrenMap.entrySet()) { + if (entry.getValue() != null) { + HddsProtos.ChildrenMap childrenMapProto = + HddsProtos.ChildrenMap.newBuilder() + .setNetworkName(entry.getKey()) + .setNetworkNode(entry.getValue().toProtobuf(clientVersion)) + .build(); + innerNode.addChildrenMap(childrenMapProto); + } + } + } + innerNode.build(); + + HddsProtos.NetworkNode networkNode = + HddsProtos.NetworkNode.newBuilder() + .setInnerNode(innerNode).build(); + + return networkNode; + } + + public static InnerNode fromProtobuf(HddsProtos.InnerNode innerNode) { + InnerNodeImpl.Builder builder = new InnerNodeImpl.Builder(); + + if (innerNode.hasNodeTopology()) { + HddsProtos.NodeTopology nodeTopology = innerNode.getNodeTopology(); + + if (nodeTopology.hasName()) { + builder.setName(nodeTopology.getName()); + } + if (nodeTopology.hasLocation()) { + builder.setLocation(nodeTopology.getLocation()); + } + if (nodeTopology.hasLevel()) { + builder.setLevel(nodeTopology.getLevel()); + } + if (nodeTopology.hasCost()) { + builder.setCost(nodeTopology.getCost()); + } + } + + if (!innerNode.getChildrenMapList().isEmpty()) { + builder.setChildrenMap(innerNode.getChildrenMapList()); + } + if (innerNode.hasNumOfLeaves()) { + builder.setNumOfLeaves(innerNode.getNumOfLeaves()); } - if (this == to) { + + return builder.build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { return true; } - return this.toString().equals(to.toString()); + if (o == null || getClass() != o.getClass()) { + return false; + } + InnerNodeImpl innerNode = (InnerNodeImpl) o; + return this.getNetworkName().equals(innerNode.getNetworkName()) && + this.getNetworkLocation().equals(innerNode.getNetworkLocation()) && + this.getLevel() == innerNode.getLevel() && + this.getCost() == innerNode.getCost() && + this.numOfLeaves == innerNode.numOfLeaves && + this.childrenMap.size() == innerNode.childrenMap.size() && + this.childrenMap.equals(innerNode.childrenMap); } @Override diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 2dc86c1b6856..f6f013259c59 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -75,6 +75,15 @@ public NetworkTopologyImpl(ConfigurationSource conf) { schemaManager.getCost(NetConstants.ROOT_LEVEL)); } + public NetworkTopologyImpl(String schemaFile, InnerNode clusterTree) { + schemaManager = NodeSchemaManager.getInstance(); + schemaManager.init(schemaFile); + maxLevel = schemaManager.getMaxLevel(); + shuffleOperation = Collections::shuffle; + factory = InnerNodeImpl.FACTORY; + this.clusterTree = clusterTree; + } + @VisibleForTesting public NetworkTopologyImpl(NodeSchemaManager manager, Consumer> shuffleOperation) { @@ -726,8 +735,13 @@ public int getDistanceCost(Node node1, Node node2) { int cost = 0; netlock.readLock().lock(); try { - if ((node1.getAncestor(level1 - 1) != clusterTree) || - (node2.getAncestor(level2 - 1) != clusterTree)) { + Node ancestor1 = node1.getAncestor(level1 - 1); + boolean node1Topology = (ancestor1 != null && clusterTree != null && + !ancestor1.equals(clusterTree)) || (ancestor1 != clusterTree); + Node ancestor2 = node2.getAncestor(level2 - 1); + boolean node2Topology = (ancestor2 != null && clusterTree != null && + !ancestor2.equals(clusterTree)) || (ancestor2 != clusterTree); + if (node1Topology || node2Topology) { LOG.debug("One of the nodes is outside of network topology"); return Integer.MAX_VALUE; } @@ -741,7 +755,7 @@ public int getDistanceCost(Node node1, Node node2) { level2--; cost += node2 == null ? 0 : node2.getCost(); } - while (node1 != null && node2 != null && node1 != node2) { + while (node1 != null && node2 != null && !node1.equals(node2)) { node1 = node1.getParent(); node2 = node2.getParent(); cost += node1 == null ? 0 : node1.getCost(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java index 9884888a1dd4..50f702cce08e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/Node.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hdds.scm.net; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + /** * The interface defines a node in a network topology. * A node may be a leave representing a data node or an inner @@ -126,4 +129,21 @@ public interface Node { * @return true if this node is under a specific scope */ boolean isDescendant(String nodePath); + + default HddsProtos.NetworkNode toProtobuf( + int clientVersion) { + return null; + } + + static Node fromProtobuf( + HddsProtos.NetworkNode networkNode) { + if (networkNode.hasDatanodeDetails()) { + return DatanodeDetails.getFromProtoBuf( + networkNode.getDatanodeDetails()); + } else if (networkNode.hasInnerNode()) { + return InnerNode.fromProtobuf(networkNode.getInnerNode()); + } else { + return null; + } + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java index e7a45f649b6e..e4d76cd3dbc7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.net; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; @@ -229,6 +230,20 @@ public boolean isDescendant(String nodePath) { NetUtils.addSuffix(nodePath)); } + public static HddsProtos.NodeTopology toProtobuf(String name, String location, + int level, int cost) { + + HddsProtos.NodeTopology.Builder nodeTopologyBuilder = + HddsProtos.NodeTopology.newBuilder() + .setName(name) + .setLocation(location) + .setLevel(level) + .setCost(cost); + + HddsProtos.NodeTopology nodeTopology = nodeTopologyBuilder.build(); + return nodeTopology; + } + @Override public boolean equals(Object to) { if (to == null) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index eecd79876720..fb37b214cad1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -62,6 +62,14 @@ public void init(ConfigurationSource conf) { String schemaFile = conf.get( ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT); + loadSchemaFile(schemaFile); + } + + public void init(String schemaFile) { + loadSchemaFile(schemaFile); + } + + private void loadSchemaFile(String schemaFile) { NodeSchemaLoadResult result; try { result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 0080686575ba..c7867ffdcbee 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -672,6 +672,11 @@ public final class OzoneConfigKeys { public static final String HDDS_SCM_CLIENT_FAILOVER_MAX_RETRY = "hdds.scmclient.failover.max.retry"; + public static final String + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION = + "ozone.om.network.topology.refresh.duration"; + public static final String + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT = "1h"; /** * There is no need to instantiate this class. diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index ee0aa4514a78..fc873f20af69 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3791,6 +3791,14 @@ Wait duration before which close container is send to DN. + + ozone.om.network.topology.refresh.duration + 1h + SCM, OZONE, OM + The duration at which we periodically fetch the updated network + topology cluster tree from SCM. + + ozone.scm.ha.ratis.server.snapshot.creation.gap 1024 diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java new file mode 100644 index 000000000000..2e42df957346 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/ScmTopologyClient.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.client; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT; + +/** + * This client implements a background thread which periodically checks and + * gets the latest network topology cluster tree from SCM. + */ +public class ScmTopologyClient { + private static final Logger LOG = + LoggerFactory.getLogger(ScmTopologyClient.class); + + private final ScmBlockLocationProtocol scmBlockLocationProtocol; + private final AtomicReference cache = new AtomicReference<>(); + private ScheduledExecutorService executorService; + + public ScmTopologyClient( + ScmBlockLocationProtocol scmBlockLocationProtocol) { + this.scmBlockLocationProtocol = scmBlockLocationProtocol; + } + + public InnerNode getClusterTree() { + return requireNonNull(cache.get(), + "ScmBlockLocationClient must have been initialized already."); + } + + public void start(ConfigurationSource conf) throws IOException { + final InnerNode initialTopology = + scmBlockLocationProtocol.getNetworkTopology(); + LOG.info("Initial network topology fetched from SCM: {}.", + initialTopology); + cache.set(initialTopology); + scheduleNetworkTopologyPoller(conf, Instant.now()); + } + + public void stop() { + if (executorService != null) { + executorService.shutdown(); + try { + if (executorService.awaitTermination(5, TimeUnit.SECONDS)) { + executorService.shutdownNow(); + } + } catch (InterruptedException e) { + LOG.error("Interrupted while shutting down executor service.", e); + Thread.currentThread().interrupt(); + } + } + } + + private void scheduleNetworkTopologyPoller(ConfigurationSource conf, + Instant initialInvocation) { + Duration refreshDuration = parseRefreshDuration(conf); + Instant nextRefresh = initialInvocation.plus(refreshDuration); + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("NetworkTopologyPoller") + .setDaemon(true) + .build(); + executorService = Executors.newScheduledThreadPool(1, threadFactory); + Duration initialDelay = Duration.between(Instant.now(), nextRefresh); + + LOG.debug("Scheduling NetworkTopologyPoller with an initial delay of {}.", + initialDelay); + executorService.scheduleAtFixedRate(() -> checkAndRefresh(), + initialDelay.toMillis(), refreshDuration.toMillis(), + TimeUnit.MILLISECONDS); + } + + public static Duration parseRefreshDuration(ConfigurationSource conf) { + long refreshDurationInMs = conf.getTimeDuration( + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION, + OZONE_OM_NETWORK_TOPOLOGY_REFRESH_DURATION_DEFAULT, + TimeUnit.MILLISECONDS); + return Duration.ofMillis(refreshDurationInMs); + } + + private synchronized void checkAndRefresh() { + InnerNode current = cache.get(); + try { + InnerNode newTopology = scmBlockLocationProtocol.getNetworkTopology(); + if (!newTopology.equals(current)) { + cache.set(newTopology); + LOG.info("Updated network topology cluster tree fetched from " + + "SCM: {}.", newTopology); + } + } catch (IOException e) { + throw new UncheckedIOException( + "Error fetching updated network topology cluster tree from SCM", e); + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java new file mode 100644 index 000000000000..8dc9cb3cca2f --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/client/package-info.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *

+ * Freon related helper classes used for load testing. + */ + +/** + * Contains SCM client related classes. + */ +package org.apache.hadoop.hdds.scm.client; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index ef2585488faa..8c84af859b4a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -138,4 +139,11 @@ List allocateBlock(long size, int numBlocks, */ List sortDatanodes(List nodes, String clientMachine) throws IOException; + + /** + * Retrieves the hierarchical cluster tree representing the network topology. + * @return the root node of the network topology cluster tree. + * @throws IOException + */ + InnerNode getNetworkTopology() throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index 2e724969998b..1f114304ccaa 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -39,6 +40,8 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.AllocateScmBlockResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.KeyBlocks; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .SortDatanodesRequestProto; @@ -49,6 +52,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.Node; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; @@ -328,6 +334,43 @@ public List sortDatanodes(List nodes, return results; } + @Override + public InnerNode getNetworkTopology() throws IOException { + GetClusterTreeRequestProto request = + GetClusterTreeRequestProto.newBuilder().build(); + SCMBlockLocationRequest wrapper = createSCMBlockRequest(Type.GetClusterTree) + .setGetClusterTreeRequest(request) + .build(); + + final SCMBlockLocationResponse wrappedResponse = + handleError(submitRequest(wrapper)); + GetClusterTreeResponseProto resp = + wrappedResponse.getGetClusterTreeResponse(); + + return (InnerNode) setParent( + InnerNodeImpl.fromProtobuf(resp.getClusterTree())); + } + + /** + * Sets the parent field for the clusterTree nodes recursively. + * + * @param node cluster tree without parents set. + * @return updated cluster tree with parents set. + */ + private Node setParent(Node node) { + if (node instanceof InnerNodeImpl) { + InnerNodeImpl innerNode = (InnerNodeImpl) node; + if (innerNode.getChildrenMap() != null) { + for (Map.Entry child : innerNode.getChildrenMap() + .entrySet()) { + child.getValue().setParent(innerNode); + setParent(child.getValue()); + } + } + } + return node; + } + @Override public Object getUnderlyingProxyObject() { return rpcProxy; diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 3f346300b3ed..405845312357 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -47,6 +47,7 @@ message DatanodeDetailsProto { optional int64 persistedOpStateExpiry = 9; // The seconds after the epoch when the OpState should expire // TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode. + optional uint32 level = 101; } /** @@ -497,3 +498,26 @@ message CompactionLogEntryProto { repeated CompactionFileInfoProto outputFileIntoList = 4; optional string compactionReason = 5; } + +message NodeTopology { + optional string name = 1; + optional string location = 2; + optional uint32 cost = 3; + optional uint32 level = 4; +} + +message NetworkNode { + optional DatanodeDetailsProto datanodeDetails = 1; + optional InnerNode innerNode = 3; +} + +message ChildrenMap { + optional string networkName = 1; + optional NetworkNode networkNode = 2; +} + +message InnerNode { + optional NodeTopology nodeTopology = 1; + optional uint32 numOfLeaves = 2; + repeated ChildrenMap childrenMap = 3; +} diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto index 307c23a56202..3d281975f2b4 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto @@ -30,7 +30,6 @@ package hadoop.hdds.block; import "hdds.proto"; - // SCM Block protocol enum Type { @@ -39,6 +38,7 @@ enum Type { GetScmInfo = 13; SortDatanodes = 14; AddScm = 15; + GetClusterTree = 16; } message SCMBlockLocationRequest { @@ -56,6 +56,7 @@ message SCMBlockLocationRequest { optional hadoop.hdds.GetScmInfoRequestProto getScmInfoRequest = 13; optional SortDatanodesRequestProto sortDatanodesRequest = 14; optional hadoop.hdds.AddScmRequestProto addScmRequestProto = 15; + optional GetClusterTreeRequestProto getClusterTreeRequest = 16; } message SCMBlockLocationResponse { @@ -80,6 +81,7 @@ message SCMBlockLocationResponse { optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; optional SortDatanodesResponseProto sortDatanodesResponse = 14; optional hadoop.hdds.AddScmResponseProto addScmResponse = 15; + optional GetClusterTreeResponseProto getClusterTreeResponse = 16; } /** @@ -230,6 +232,13 @@ message SortDatanodesResponseProto{ repeated DatanodeDetailsProto node = 1; } +message GetClusterTreeRequestProto { +} + +message GetClusterTreeResponseProto { + required InnerNode clusterTree = 1; +} + /** * Protocol used from OzoneManager to StorageContainerManager. * See request and response messages for details of the RPC calls. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java index 7893e90812dc..ab296fc52bf8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeInfo.java @@ -354,5 +354,4 @@ public int hashCode() { public boolean equals(Object obj) { return super.equals(obj); } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java index 7b1d6dd27d3a..3aff2f456e4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java @@ -261,5 +261,4 @@ public int compareTo(NodeStatus o) { } return order; } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index 0914cdd90b22..e77e2aebb31f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteKeyBlocksResultProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.GetClusterTreeResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SortDatanodesRequestProto; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.RatisUtil; +import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -159,6 +161,10 @@ private SCMBlockLocationResponse processMessage( request.getSortDatanodesRequest(), request.getVersion() )); break; + case GetClusterTree: + response.setGetClusterTreeResponse( + getClusterTree(request.getVersion())); + break; default: // Should never happen throw new IOException("Unknown Operation " + request.getCmdType() + @@ -276,4 +282,13 @@ public SortDatanodesResponseProto sortDatanodes( throw new ServiceException(ex); } } + + public GetClusterTreeResponseProto getClusterTree(int clientVersion) + throws IOException { + GetClusterTreeResponseProto.Builder resp = + GetClusterTreeResponseProto.newBuilder(); + InnerNode clusterTree = impl.getNetworkTopology(); + resp.setClusterTree(clusterTree.toProtobuf(clientVersion).getInnerNode()); + return resp.build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 0747f04584bd..79002e27a2e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -74,6 +74,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_HANDLER_COUNT_KEY; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.IO_EXCEPTION; import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.startRpcServer; import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress; @@ -417,6 +418,11 @@ private Node getOtherNode(String clientMachine) { return null; } + @Override + public InnerNode getNetworkTopology() { + return (InnerNode) scm.getClusterMap().getNode(ROOT); + } + @Override public AuditMessage buildAuditMessageForSuccess( AuditAction op, Map auditMap) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index a82a1a8be70a..77970ad4470b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -46,6 +47,7 @@ import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; @@ -312,6 +314,8 @@ public void testDelegationToken(boolean useIp) throws Exception { try { // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient( + new ScmBlockLocationTestingClient(null, null, 0))); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String username = ugi.getUserName(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java new file mode 100644 index 000000000000..463c8b5ae5d9 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; + +import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +import java.io.IOException; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * + * This class is to test the serialization/deserialization of cluster tree + * information from SCM. + */ +@Timeout(300) +public class TestGetClusterTreeInformation { + + public static final Logger LOG = + LoggerFactory.getLogger(TestGetClusterTreeInformation.class); + private static int numOfDatanodes = 3; + private static MiniOzoneCluster cluster; + private static OzoneConfiguration conf; + private static StorageContainerManager scm; + + @BeforeAll + public static void init() throws IOException, TimeoutException, + InterruptedException { + conf = new OzoneConfiguration(); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(numOfDatanodes) + .setNumOfOzoneManagers(3) + .setNumOfStorageContainerManagers(3) + .build(); + cluster.waitForClusterToBeReady(); + scm = cluster.getStorageContainerManager(); + } + + @AfterAll + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testGetClusterTreeInformation() throws IOException { + SCMBlockLocationFailoverProxyProvider failoverProxyProvider = + new SCMBlockLocationFailoverProxyProvider(conf); + failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId()); + ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = + new ScmBlockLocationProtocolClientSideTranslatorPB( + failoverProxyProvider); + + InnerNode expectedInnerNode = (InnerNode) scm.getClusterMap().getNode(ROOT); + InnerNode actualInnerNode = scmBlockLocationClient.getNetworkTopology(); + assertEquals(expectedInnerNode, actualInnerNode); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java new file mode 100644 index 000000000000..cef872597e43 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOMSortDatanodes.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import com.google.common.collect.ImmutableMap; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerStub; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.server.SCMConfigurator; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.net.StaticMapping; + +import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; + +import java.io.File; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_LEVEL; +import static org.mockito.Mockito.mock; + +/** + * {@link org.apache.hadoop.hdds.scm.server.TestSCMBlockProtocolServer} + * sortDatanodes tests for + * {@link org.apache.hadoop.ozone.om.KeyManagerImpl#sortDatanodes(List, String)}. + */ +@Timeout(300) +public class TestOMSortDatanodes { + + private static OzoneConfiguration config; + private static StorageContainerManager scm; + private static NodeManager nodeManager; + private static KeyManagerImpl keyManager; + private static StorageContainerLocationProtocol mockScmContainerClient; + private static OzoneManager om; + private static File dir; + private static final int NODE_COUNT = 10; + private static final Map EDGE_NODES = ImmutableMap.of( + "edge0", "/rack0", + "edge1", "/rack1" + ); + + @BeforeAll + public static void setup() throws Exception { + config = new OzoneConfiguration(); + dir = GenericTestUtils.getRandomizedTestDir(); + config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); + config.set(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + StaticMapping.class.getName()); + config.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); + List datanodes = new ArrayList<>(NODE_COUNT); + List nodeMapping = new ArrayList<>(NODE_COUNT); + for (int i = 0; i < NODE_COUNT; i++) { + DatanodeDetails dn = randomDatanodeDetails(); + final String rack = "/rack" + (i % 2); + nodeMapping.add(dn.getHostName() + "=" + rack); + nodeMapping.add(dn.getIpAddress() + "=" + rack); + datanodes.add(dn); + } + EDGE_NODES.forEach((n, r) -> nodeMapping.add(n + "=" + r)); + config.set(StaticMapping.KEY_HADOOP_CONFIGURED_NODE_MAPPING, + String.join(",", nodeMapping)); + + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(SCMHAManagerStub.getInstance(true)); + configurator.setScmContext(SCMContext.emptyContext()); + scm = HddsTestUtils.getScm(config, configurator); + scm.start(); + scm.exitSafeMode(); + nodeManager = scm.getScmNodeManager(); + datanodes.forEach(dn -> nodeManager.register(dn, null, null)); + mockScmContainerClient = + mock(StorageContainerLocationProtocol.class); + OmTestManagers omTestManagers + = new OmTestManagers(config, scm.getBlockProtocolServer(), + mockScmContainerClient); + om = omTestManagers.getOzoneManager(); + keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); + } + + @AfterAll + public static void cleanup() throws Exception { + if (scm != null) { + scm.stop(); + scm.join(); + } + if (om != null) { + om.stop(); + } + FileUtils.deleteDirectory(dir); + } + + @Test + public void sortDatanodesRelativeToDatanode() { + for (DatanodeDetails dn : nodeManager.getAllNodes()) { + assertEquals(ROOT_LEVEL + 2, dn.getLevel()); + List sorted = + keyManager.sortDatanodes(nodeManager.getAllNodes(), nodeAddress(dn)); + assertEquals(dn, sorted.get(0), + "Source node should be sorted very first"); + assertRackOrder(dn.getNetworkLocation(), sorted); + } + } + + @Test + public void sortDatanodesRelativeToNonDatanode() { + for (Map.Entry entry : EDGE_NODES.entrySet()) { + assertRackOrder(entry.getValue(), + keyManager.sortDatanodes(nodeManager.getAllNodes(), entry.getKey())); + } + } + + @Test + public void testSortDatanodes() { + List nodes = nodeManager.getAllNodes(); + + // sort normal datanodes + String client; + client = nodeManager.getAllNodes().get(0).getIpAddress(); + List datanodeDetails = + keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + + // illegal client 1 + client += "X"; + datanodeDetails = keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + + // illegal client 2 + client = "/default-rack"; + datanodeDetails = keyManager.sortDatanodes(nodes, client); + assertEquals(NODE_COUNT, datanodeDetails.size()); + } + + private static void assertRackOrder(String rack, List list) { + int size = list.size(); + for (int i = 0; i < size / 2; i++) { + assertEquals(rack, list.get(i).getNetworkLocation(), + "Nodes in the same rack should be sorted first"); + } + for (int i = size / 2; i < size; i++) { + assertNotEquals(rack, list.get(i).getNetworkLocation(), + "Nodes in the other rack should be sorted last"); + } + } + + private String nodeAddress(DatanodeDetails dn) { + boolean useHostname = config.getBoolean( + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + return useHostname ? dn.getHostName() : dn.getIpAddress(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index bc49d176da9a..d183ed8229cc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -50,10 +50,12 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; @@ -87,6 +89,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.ScmBlockLocationTestingClient; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; @@ -208,6 +211,7 @@ final class TestSecureOzoneCluster { private File testUserKeytab; private String testUserPrincipal; private StorageContainerManager scm; + private ScmBlockLocationProtocol scmBlockClient; private OzoneManager om; private HddsProtos.OzoneManagerDetailsProto omInfo; private String host; @@ -264,6 +268,7 @@ void init() { clusterId = UUID.randomUUID().toString(); scmId = UUID.randomUUID().toString(); omId = UUID.randomUUID().toString(); + scmBlockClient = new ScmBlockLocationTestingClient(null, null, 0); startMiniKdc(); setSecureConfig(); @@ -609,6 +614,7 @@ void testAccessControlExceptionOnClient() throws Exception { setupOm(conf); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); } catch (Exception ex) { // Expects timeout failure from scmClient in om but om user login via @@ -676,6 +682,7 @@ void testDelegationTokenRenewal() throws Exception { setupOm(conf); OzoneManager.setTestSecureOmFlag(true); om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); @@ -763,6 +770,7 @@ void testGetSetRevokeS3Secret() throws Exception { setupOm(conf); // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); String username = ugi.getUserName(); @@ -999,6 +1007,7 @@ void testCertificateRotation() throws Exception { // create Ozone Manager instance, it will start the monitor task conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.setCertClient(client); // check after renew, client will have the new cert ID @@ -1164,6 +1173,7 @@ void testCertificateRotationUnRecoverableFailure() throws Exception { // create Ozone Manager instance, it will start the monitor task conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.setCertClient(mockClient); // check error message during renew @@ -1202,6 +1212,7 @@ void testDelegationTokenRenewCrossCertificateRenew() throws Exception { String omCertId1 = omCert.getSerialNumber().toString(); // Start OM om.setCertClient(certClient); + om.setScmTopologyClient(new ScmTopologyClient(scmBlockClient)); om.start(); GenericTestUtils.waitFor(() -> om.isLeaderReady(), 100, 10000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java index 50ff9c36a0a3..2ae69dc3c96f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmContainerLocationCache.java @@ -50,6 +50,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; @@ -162,6 +165,9 @@ public static void setUp() throws Exception { mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); mockScmContainerClient = mock(StorageContainerLocationProtocol.class); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(mockScmBlockLocationProtocol.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); OmTestManagers omTestManagers = new OmTestManagers(conf, mockScmBlockLocationProtocol, mockScmContainerClient); @@ -247,10 +253,13 @@ private static void createVolume(String volumeName) throws IOException { } @BeforeEach - public void beforeEach() { + public void beforeEach() throws IOException { CONTAINER_ID.getAndIncrement(); reset(mockScmBlockLocationProtocol, mockScmContainerClient, mockDn1Protocol, mockDn2Protocol); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(mockScmBlockLocationProtocol.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); when(mockDn1Protocol.getPipeline()).thenReturn(createPipeline(DN1)); when(mockDn2Protocol.getPipeline()).thenReturn(createPipeline(DN2)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java index 41f1c14f3727..72f1c3374b28 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java @@ -46,6 +46,7 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.ozone.OzoneAcl; @@ -197,6 +198,8 @@ private void setupEnvironment(boolean aclEnabled, OzoneManager.setTestSecureOmFlag(true); om = OzoneManager.createOm(conf); + om.setScmTopologyClient(new ScmTopologyClient( + new ScmBlockLocationTestingClient(null, null, 0))); om.setCertClient(new CertificateClientTestImpl(conf)); om.start(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index ffe1908c6852..d2ca26e3fc08 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -45,11 +45,15 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.net.NodeImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.utils.BackgroundService; @@ -58,6 +62,9 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.net.CachedDNSToSwitchMapping; +import org.apache.hadoop.net.DNSToSwitchMapping; +import org.apache.hadoop.net.TableMapping; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -96,6 +103,7 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.RequestContext; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Time; import com.google.common.annotations.VisibleForTesting; @@ -108,6 +116,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; +import static org.apache.hadoop.hdds.scm.net.NetConstants.NODE_COST_DEFAULT; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; @@ -190,6 +199,7 @@ public class KeyManagerImpl implements KeyManager { private BackgroundService openKeyCleanupService; private BackgroundService multipartUploadCleanupService; private SnapshotDirectoryCleaningService snapshotDirectoryCleaningService; + private DNSToSwitchMapping dnsToSwitchMapping; public KeyManagerImpl(OzoneManager om, ScmClient scmClient, OzoneConfiguration conf, OMPerformanceMetrics metrics) { @@ -339,6 +349,16 @@ public void start(OzoneConfiguration configuration) { ozoneManager, configuration); multipartUploadCleanupService.start(); } + + Class dnsToSwitchMappingClass = + configuration.getClass( + DFSConfigKeysLegacy.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, + TableMapping.class, DNSToSwitchMapping.class); + DNSToSwitchMapping newInstance = ReflectionUtils.newInstance( + dnsToSwitchMappingClass, configuration); + dnsToSwitchMapping = + ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance + : new CachedDNSToSwitchMapping(newInstance)); } KeyProviderCryptoExtension getKMSProvider() { @@ -1844,8 +1864,7 @@ private FileEncryptionInfo getFileEncryptionInfo(OmBucketInfo bucketInfo) return encInfo; } - @VisibleForTesting - void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { + private void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { if (keyInfos != null && clientMachine != null) { Map, List> sortedPipelines = new HashMap<>(); for (OmKeyInfo keyInfo : keyInfos) { @@ -1865,8 +1884,7 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { LOG.warn("No datanodes in pipeline {}", pipeline.getId()); continue; } - sortedNodes = sortDatanodes(clientMachine, nodes, keyInfo, - uuidList); + sortedNodes = sortDatanodes(nodes, clientMachine); if (sortedNodes != null) { sortedPipelines.put(uuidSet, sortedNodes); } @@ -1882,24 +1900,59 @@ void sortDatanodes(String clientMachine, OmKeyInfo... keyInfos) { } } - private List sortDatanodes(String clientMachine, - List nodes, OmKeyInfo keyInfo, List nodeList) { - List sortedNodes = null; + @VisibleForTesting + public List sortDatanodes(List nodes, + String clientMachine) { + final Node client = getClientNode(clientMachine, nodes); + return ozoneManager.getClusterMap() + .sortByDistanceCost(client, nodes, nodes.size()); + } + + private Node getClientNode(String clientMachine, + List nodes) { + List matchingNodes = new ArrayList<>(); + boolean useHostname = ozoneManager.getConfiguration().getBoolean( + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME, + DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT); + for (DatanodeDetails node : nodes) { + if ((useHostname ? node.getHostName() : node.getIpAddress()).equals( + clientMachine)) { + matchingNodes.add(node); + } + } + return !matchingNodes.isEmpty() ? matchingNodes.get(0) : + getOtherNode(clientMachine); + } + + private Node getOtherNode(String clientMachine) { try { - sortedNodes = scmClient.getBlockClient() - .sortDatanodes(nodeList, clientMachine); - if (LOG.isDebugEnabled()) { - LOG.debug("Sorted datanodes {} for client {}, result: {}", nodes, - clientMachine, sortedNodes); + String clientLocation = resolveNodeLocation(clientMachine); + if (clientLocation != null) { + Node rack = ozoneManager.getClusterMap().getNode(clientLocation); + if (rack instanceof InnerNode) { + return new NodeImpl(clientMachine, clientLocation, + (InnerNode) rack, rack.getLevel() + 1, + NODE_COST_DEFAULT); + } } - } catch (IOException e) { - LOG.warn("Unable to sort datanodes based on distance to client, " - + " volume={}, bucket={}, key={}, client={}, datanodes={}, " - + " exception={}", - keyInfo.getVolumeName(), keyInfo.getBucketName(), - keyInfo.getKeyName(), clientMachine, nodeList, e.getMessage()); + } catch (Exception e) { + LOG.info("Could not resolve client {}: {}", + clientMachine, e.getMessage()); + } + return null; + } + + private String resolveNodeLocation(String hostname) { + List hosts = Collections.singletonList(hostname); + List resolvedHosts = dnsToSwitchMapping.resolve(hosts); + if (resolvedHosts != null && !resolvedHosts.isEmpty()) { + String location = resolvedHosts.get(0); + LOG.debug("Node {} resolved to location {}", hostname, location); + return location; + } else { + LOG.debug("Node resolution did not yield any result for {}", hostname); + return null; } - return sortedNodes; } private static List toNodeUuid(Collection nodes) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index b8133e5844f9..c27ba7836bf5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -82,7 +82,11 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.server.OzoneAdmins; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; @@ -354,6 +358,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneBlockTokenSecretManager blockTokenMgr; private CertificateClient certClient; private SecretKeySignerClient secretKeyClient; + private ScmTopologyClient scmTopologyClient; private final Text omRpcAddressTxt; private OzoneConfiguration configuration; private RPC.Server omRpcServer; @@ -386,6 +391,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneManagerHttpServer httpServer; private final OMStorage omStorage; private ObjectName omInfoBeanName; + private NetworkTopology clusterMap; private Timer metricsTimer; private ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask; private static final ObjectWriter WRITER = @@ -603,6 +609,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) final StorageContainerLocationProtocol scmContainerClient = getScmContainerClient(configuration); // verifies that the SCM info in the OM Version file is correct. final ScmBlockLocationProtocol scmBlockClient = getScmBlockClient(configuration); + scmTopologyClient = new ScmTopologyClient(scmBlockClient); this.scmClient = new ScmClient(scmBlockClient, scmContainerClient, configuration); this.ozoneLockProvider = new OzoneLockProvider(getKeyPathLockEnabled(), @@ -1135,6 +1142,24 @@ public void setCertClient(CertificateClient newClient) throws IOException { serviceInfo = new ServiceInfoProvider(secConfig, this, certClient); } + /** + * For testing purpose only. This allows setting up ScmBlockLocationClient + * without having to fully setup a working cluster. + */ + @VisibleForTesting + public void setScmTopologyClient( + ScmTopologyClient scmTopologyClient) { + this.scmTopologyClient = scmTopologyClient; + } + + public NetworkTopology getClusterMap() { + InnerNode currentTree = scmTopologyClient.getClusterTree(); + return new NetworkTopologyImpl(configuration.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + currentTree); + } + /** * For testing purpose only. This allows testing token in integration test * without fully setting up a working secure cluster. @@ -1673,6 +1698,18 @@ public void start() throws IOException { keyManager.start(configuration); + try { + scmTopologyClient.start(configuration); + } catch (IOException ex) { + LOG.error("Unable to initialize network topology schema file. ", ex); + throw new UncheckedIOException(ex); + } + + clusterMap = new NetworkTopologyImpl(configuration.get( + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, + ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT), + scmTopologyClient.getClusterTree()); + try { httpServer = new OzoneManagerHttpServer(configuration, this); httpServer.start(); @@ -2232,6 +2269,11 @@ public boolean stop() { } keyManager.stop(); stopSecretManager(); + + if (scmTopologyClient != null) { + scmTopologyClient.stop(); + } + if (httpServer != null) { httpServer.stop(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java index 43d29c1608a8..edffd5ed74eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -105,12 +106,16 @@ public OmTestManagers(OzoneConfiguration conf, keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils .getInternalState(om, "keyManager"); ScmClient scmClient = new ScmClient(scmBlockClient, containerClient, conf); + ScmTopologyClient scmTopologyClient = + new ScmTopologyClient(scmBlockClient); HddsWhiteboxTestUtils.setInternalState(om, "scmClient", scmClient); HddsWhiteboxTestUtils.setInternalState(keyManager, "scmClient", scmClient); HddsWhiteboxTestUtils.setInternalState(keyManager, "secretManager", mock(OzoneBlockTokenSecretManager.class)); + HddsWhiteboxTestUtils.setInternalState(om, + "scmTopologyClient", scmTopologyClient); om.start(); waitFor(() -> om.getOmRatisServer().checkLeaderStatus() == RaftServerStatus.LEADER_AND_READY, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java index 8847a2d51e3f..8ba5ca779c1e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ScmBlockLocationTestingClient.java @@ -31,6 +31,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; @@ -200,6 +203,14 @@ public List sortDatanodes(List nodes, return null; } + @Override + public InnerNode getNetworkTopology() { + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + InnerNode clusterTree = factory.newInnerNode("", "", null, + NetConstants.ROOT_LEVEL, 1); + return clusterTree; + } + /** * Return the number of blocks puesdo deleted by this testing client. */ diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 278d96023c81..5e2e27e0c1f4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -23,12 +23,10 @@ import java.nio.file.Path; import java.time.Instant; import java.util.ArrayList; -import java.util.HashMap; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; @@ -44,6 +42,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.net.InnerNode; +import org.apache.hadoop.hdds.scm.net.InnerNodeImpl; +import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -79,14 +80,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.api.TestInstance; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import static com.google.common.collect.Sets.newHashSet; -import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; -import static java.util.Comparator.comparing; -import static java.util.stream.Collectors.toList; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -124,6 +120,9 @@ void setup(@TempDir Path testDir) throws Exception { configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); containerClient = mock(StorageContainerLocationProtocol.class); blockClient = mock(ScmBlockLocationProtocol.class); + InnerNode.Factory factory = InnerNodeImpl.FACTORY; + when(blockClient.getNetworkTopology()).thenReturn( + factory.newInnerNode("", "", null, NetConstants.ROOT_LEVEL, 1)); OmTestManagers omTestManagers = new OmTestManagers(configuration, blockClient, containerClient); @@ -644,9 +643,6 @@ public void listStatus() throws Exception { OMRequestTestUtils.addBucketToDB(volume, bucket, metadataManager); final Pipeline pipeline = MockPipeline.createPipeline(3); - final List nodes = pipeline.getNodes().stream() - .map(DatanodeDetails::getUuidString) - .collect(toList()); Set containerIDs = new HashSet<>(); List containersWithPipeline = new ArrayList<>(); @@ -696,7 +692,6 @@ public void listStatus() throws Exception { assertEquals(10, fileStatusList.size()); verify(containerClient).getContainerWithPipelineBatch(containerIDs); - verify(blockClient).sortDatanodes(nodes, client); // call list status the second time, and verify no more calls to // SCM. @@ -704,67 +699,4 @@ public void listStatus() throws Exception { null, Long.MAX_VALUE, client); verify(containerClient, times(1)).getContainerWithPipelineBatch(anySet()); } - - @ParameterizedTest - @ValueSource(strings = {"anyhost", ""}) - public void sortDatanodes(String client) throws Exception { - // GIVEN - int pipelineCount = 3; - int keysPerPipeline = 5; - OmKeyInfo[] keyInfos = new OmKeyInfo[pipelineCount * keysPerPipeline]; - List> expectedSortDatanodesInvocations = new ArrayList<>(); - Map> expectedSortedNodes = new HashMap<>(); - int ki = 0; - for (int p = 0; p < pipelineCount; p++) { - final Pipeline pipeline = MockPipeline.createPipeline(3); - final List nodes = pipeline.getNodes().stream() - .map(DatanodeDetails::getUuidString) - .collect(toList()); - expectedSortDatanodesInvocations.add(nodes); - final List sortedNodes = pipeline.getNodes().stream() - .sorted(comparing(DatanodeDetails::getUuidString)) - .collect(toList()); - expectedSortedNodes.put(pipeline, sortedNodes); - - when(blockClient.sortDatanodes(nodes, client)) - .thenReturn(sortedNodes); - - for (int i = 1; i <= keysPerPipeline; i++) { - OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(i, 1L)) - .setPipeline(pipeline) - .setOffset(0) - .setLength(256000) - .build(); - - OmKeyInfo keyInfo = new OmKeyInfo.Builder() - .setOmKeyLocationInfos(Arrays.asList( - new OmKeyLocationInfoGroup(0, emptyList()), - new OmKeyLocationInfoGroup(1, singletonList(keyLocationInfo)))) - .build(); - keyInfos[ki++] = keyInfo; - } - } - - // WHEN - keyManager.sortDatanodes(client, keyInfos); - - // THEN - // verify all key info locations got updated - for (OmKeyInfo keyInfo : keyInfos) { - OmKeyLocationInfoGroup locations = keyInfo.getLatestVersionLocations(); - assertNotNull(locations); - for (OmKeyLocationInfo locationInfo : locations.getLocationList()) { - Pipeline pipeline = locationInfo.getPipeline(); - List expectedOrder = expectedSortedNodes.get(pipeline); - assertEquals(expectedOrder, pipeline.getNodesInOrder()); - } - } - - // expect one invocation per pipeline - for (List nodes : expectedSortDatanodesInvocations) { - verify(blockClient).sortDatanodes(nodes, client); - } - } - } From a145dd5bfd80310ebe393c6886c059b9db7f8939 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Thu, 7 Mar 2024 06:13:45 +0100 Subject: [PATCH 107/108] HDDS-9343. (addendum) Shift sortDatanodes logic to OM (#5391) --- .../apache/hadoop/ozone/TestGetClusterTreeInformation.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java index 463c8b5ae5d9..9becc8b2591c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java @@ -55,10 +55,10 @@ public class TestGetClusterTreeInformation { public static void init() throws IOException, TimeoutException, InterruptedException { conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) + cluster = MiniOzoneCluster.newHABuilder(conf) .setNumOfOzoneManagers(3) .setNumOfStorageContainerManagers(3) + .setNumDatanodes(numOfDatanodes) .build(); cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); From 7c8160fe2c31ea6a945e0c16c2c16d230d7a0fb2 Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Thu, 7 Mar 2024 06:56:19 -0800 Subject: [PATCH 108/108] HDDS-10482. OMRequestTestUtils.createOmKeyInfo should set key modification time (#6343) --- .../java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java | 1 + .../org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java | 1 + 2 files changed, 2 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index 15af3910e90f..1e2fb6a60a80 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -698,6 +698,7 @@ private void testGetExpiredOpenKeysExcludeMPUKeys( RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)) .setCreationTime(expiredOpenKeyCreationTime) .build(); + assertThat(keyInfo.getModificationTime()).isPositive(); final String uploadId = OMMultipartUploadUtils.getMultipartUploadId(); final OmMultipartKeyInfo multipartKeyInfo = OMRequestTestUtils. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index e85675e9b079..0ebd6946bd29 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -556,6 +556,7 @@ public static OmKeyInfo.Builder createOmKeyInfo(String volumeName, String bucket .setObjectID(0L) .setUpdateID(0L) .setCreationTime(Time.now()) + .setModificationTime(Time.now()) .addOmKeyLocationInfoGroup(omKeyLocationInfoGroup) .setDataSize(1000L); }