diff --git a/docker/monitornode/dashboards/cryptosim-dashboard.json b/docker/monitornode/dashboards/cryptosim-dashboard.json index 288035db07..08647b78ab 100644 --- a/docker/monitornode/dashboards/cryptosim-dashboard.json +++ b/docker/monitornode/dashboards/cryptosim-dashboard.json @@ -436,7 +436,7 @@ "targets": [ { "editorMode": "code", - "expr": "rate(cryptosim_transactions_processed_total[5m])", + "expr": "rate(cryptosim_transactions_processed_total[$__rate_interval])", "legendFormat": "__auto", "range": true, "refId": "A" @@ -453,6 +453,782 @@ "x": 0, "y": 33 }, + "id": 277, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 34 + }, + "id": 279, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "pebblecache_size_entries", + "legendFormat": "{{cache}}", + "range": true, + "refId": "A" + } + ], + "title": "Cache Entry Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 34 + }, + "id": 278, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "pebblecache_size_bytes", + "legendFormat": "{{cache}}", + "range": true, + "refId": "A" + } + ], + "title": "Cache Size (bytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 106 + }, + "id": 280, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_hits_total[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Hits / Second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "locale" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 106 + }, + "id": 281, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_misses_total[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Misses / Second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 114 + }, + "id": 282, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_hits_total[$__rate_interval]))\n/\nclamp_min(\n sum by (cache) (rate(pebblecache_hits_total[$__rate_interval]))\n +\n sum by (cache) (rate(pebblecache_misses_total[$__rate_interval])),\n 1e-10\n)\n* 100", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Hit Percentage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 114 + }, + "id": 285, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "sum by (cache) (rate(pebblecache_miss_latency_seconds_sum[$__rate_interval]))\n/\nclamp_min(sum by (cache) (rate(pebblecache_miss_latency_seconds_count[$__rate_interval])), 1e-10)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Average Cache Miss Latency", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 122 + }, + "id": 284, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "histogram_quantile(0.5, sum by (cache, le) (rate(pebblecache_miss_latency_seconds_bucket[$__rate_interval])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Miss Latency (p50)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 122 + }, + "id": 283, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum by (cache, le) (rate(pebblecache_miss_latency_seconds_bucket[$__rate_interval])))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Miss Latency (p99)", + "type": "timeseries" + } + ], + "title": "Cache", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, "id": 9, "panels": [ { @@ -521,7 +1297,7 @@ "h": 8, "w": 12, "x": 0, - "y": 18 + "y": 194 }, "id": 6, "options": { @@ -651,7 +1427,7 @@ "h": 8, "w": 12, "x": 12, - "y": 18 + "y": 194 }, "id": 7, "options": { @@ -746,7 +1522,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3370 + "y": 202 }, "id": 10, "options": { @@ -840,7 +1616,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3370 + "y": 202 }, "id": 11, "options": { @@ -879,7 +1655,7 @@ "h": 1, "w": 24, "x": 0, - "y": 34 + "y": 35 }, "id": 13, "panels": [ @@ -909,7 +1685,7 @@ "h": 16, "w": 12, "x": 0, - "y": 19 + "y": 6995 }, "id": 20, "options": { @@ -1012,7 +1788,7 @@ "h": 8, "w": 12, "x": 12, - "y": 19 + "y": 6995 }, "id": 19, "options": { @@ -1143,7 +1919,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3243 + "y": 7011 }, "id": 21, "options": { @@ -1238,7 +2014,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3291 + "y": 7019 }, "id": 22, "options": { @@ -1369,7 +2145,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3291 + "y": 7019 }, "id": 23, "options": { @@ -1500,7 +2276,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3299 + "y": 7027 }, "id": 24, "options": { @@ -1631,7 +2407,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3299 + "y": 7027 }, "id": 25, "options": { @@ -1762,7 +2538,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3307 + "y": 7035 }, "id": 26, "options": { @@ -1893,7 +2669,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3307 + "y": 7035 }, "id": 27, "options": { @@ -2024,7 +2800,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3315 + "y": 7043 }, "id": 28, "options": { @@ -2099,7 +2875,7 @@ "h": 1, "w": 24, "x": 0, - "y": 35 + "y": 36 }, "id": 8, "panels": [ @@ -2168,7 +2944,7 @@ "h": 8, "w": 12, "x": 0, - "y": 44 + "y": 6996 }, "id": 1, "options": { @@ -2267,7 +3043,7 @@ "h": 8, "w": 12, "x": 12, - "y": 44 + "y": 6996 }, "id": 18, "options": { @@ -2342,7 +3118,7 @@ "h": 1, "w": 24, "x": 0, - "y": 36 + "y": 37 }, "id": 12, "panels": [ @@ -2411,7 +3187,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7478 + "y": 17878 }, "id": 3, "options": { @@ -2510,7 +3286,7 @@ "h": 8, "w": 12, "x": 12, - "y": 7478 + "y": 17878 }, "id": 4, "options": { @@ -2585,9 +3361,9 @@ "h": 1, "w": 24, "x": 0, - "y": 37 + "y": 38 }, - "id": 277, + "id": 286, "panels": [ { "datasource": { @@ -2655,9 +3431,9 @@ "h": 8, "w": 12, "x": 0, - "y": 0 + "y": 151 }, - "id": 278, + "id": 287, "options": { "legend": { "calcs": [], @@ -2785,9 +3561,9 @@ "h": 8, "w": 12, "x": 12, - "y": 0 + "y": 151 }, - "id": 279, + "id": 288, "options": { "legend": { "calcs": [], @@ -2884,9 +3660,9 @@ "h": 8, "w": 12, "x": 0, - "y": 8 + "y": 159 }, - "id": 280, + "id": 289, "options": { "legend": { "calcs": [], @@ -2978,9 +3754,9 @@ "h": 8, "w": 12, "x": 12, - "y": 8 + "y": 159 }, - "id": 281, + "id": 290, "options": { "legend": { "calcs": [], @@ -3021,7 +3797,7 @@ "h": 1, "w": 24, "x": 0, - "y": 38 + "y": 39 }, "id": 29, "panels": [ @@ -3091,7 +3867,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3382 + "y": 13782 }, "id": 31, "options": { @@ -3186,7 +3962,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3382 + "y": 13782 }, "id": 36, "options": { @@ -3281,7 +4057,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3390 + "y": 13790 }, "id": 38, "options": { @@ -3320,7 +4096,7 @@ "h": 1, "w": 24, "x": 0, - "y": 39 + "y": 40 }, "id": 35, "panels": [ @@ -3389,7 +4165,7 @@ "h": 8, "w": 12, "x": 0, - "y": 8089 + "y": 18489 }, "id": 30, "options": { @@ -3484,7 +4260,7 @@ "h": 8, "w": 12, "x": 12, - "y": 8089 + "y": 18489 }, "id": 33, "options": { @@ -3579,7 +4355,7 @@ "h": 8, "w": 12, "x": 0, - "y": 8129 + "y": 18529 }, "id": 34, "options": { @@ -3618,7 +4394,7 @@ "h": 1, "w": 24, "x": 0, - "y": 40 + "y": 41 }, "id": 37, "panels": [ @@ -3688,7 +4464,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7482 + "y": 42 }, "id": 39, "options": { @@ -3783,7 +4559,7 @@ "h": 8, "w": 12, "x": 12, - "y": 7482 + "y": 42 }, "id": 40, "options": { @@ -3878,7 +4654,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7490 + "y": 50 }, "id": 41, "options": { @@ -3973,7 +4749,7 @@ "h": 8, "w": 12, "x": 12, - "y": 7490 + "y": 50 }, "id": 42, "options": { @@ -4068,7 +4844,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7498 + "y": 58 }, "id": 32, "options": { @@ -4096,6 +4872,101 @@ ], "title": "Open File Descriptors", "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 58 + }, + "id": 291, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.4.0", + "targets": [ + { + "editorMode": "code", + "expr": "cryptosim_log_dir_size_bytes", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Log Files Size", + "type": "timeseries" } ], "title": "File System", @@ -4107,7 +4978,7 @@ "h": 1, "w": 24, "x": 0, - "y": 41 + "y": 42 }, "id": 44, "panels": [ @@ -4177,7 +5048,7 @@ "h": 8, "w": 12, "x": 0, - "y": 7283 + "y": 17683 }, "id": 43, "options": { @@ -4216,7 +5087,7 @@ "h": 1, "w": 24, "x": 0, - "y": 42 + "y": 43 }, "id": 117, "panels": [ @@ -4476,7 +5347,7 @@ "h": 8, "w": 12, "x": 0, - "y": 218 + "y": 10618 }, "id": 261, "options": { @@ -4571,7 +5442,7 @@ "h": 8, "w": 12, "x": 12, - "y": 218 + "y": 10618 }, "id": 263, "options": { @@ -4666,7 +5537,7 @@ "h": 8, "w": 12, "x": 0, - "y": 226 + "y": 10626 }, "id": 262, "options": { @@ -4761,7 +5632,7 @@ "h": 8, "w": 12, "x": 12, - "y": 226 + "y": 10626 }, "id": 264, "options": { @@ -4800,7 +5671,7 @@ "h": 1, "w": 24, "x": 0, - "y": 43 + "y": 44 }, "id": 191, "panels": [ @@ -4869,7 +5740,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3403 + "y": 13803 }, "id": 155, "options": { @@ -4964,7 +5835,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3403 + "y": 13803 }, "id": 111, "options": { @@ -5059,7 +5930,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3467 + "y": 13867 }, "id": 175, "options": { @@ -5154,7 +6025,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3467 + "y": 13867 }, "id": 173, "options": { @@ -5248,7 +6119,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3475 + "y": 13875 }, "id": 138, "options": { @@ -5343,7 +6214,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3475 + "y": 13875 }, "id": 172, "options": { @@ -5438,7 +6309,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3483 + "y": 13883 }, "id": 236, "options": { @@ -5477,7 +6348,7 @@ "h": 1, "w": 24, "x": 0, - "y": 44 + "y": 45 }, "id": 118, "panels": [ @@ -5546,7 +6417,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1556 + "y": 11956 }, "id": 127, "options": { @@ -5641,7 +6512,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1556 + "y": 11956 }, "id": 120, "options": { @@ -5735,7 +6606,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1564 + "y": 11964 }, "id": 128, "options": { @@ -5830,7 +6701,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1564 + "y": 11964 }, "id": 121, "options": { @@ -5924,7 +6795,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1572 + "y": 11972 }, "id": 129, "options": { @@ -6019,7 +6890,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1572 + "y": 11972 }, "id": 122, "options": { @@ -6113,7 +6984,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1580 + "y": 11980 }, "id": 130, "options": { @@ -6208,7 +7079,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1580 + "y": 11980 }, "id": 123, "options": { @@ -6302,7 +7173,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1588 + "y": 11988 }, "id": 131, "options": { @@ -6397,7 +7268,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1588 + "y": 11988 }, "id": 124, "options": { @@ -6491,7 +7362,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1596 + "y": 11996 }, "id": 132, "options": { @@ -6586,7 +7457,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1596 + "y": 11996 }, "id": 125, "options": { @@ -6680,7 +7551,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1604 + "y": 12004 }, "id": 119, "options": { @@ -6775,7 +7646,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1604 + "y": 12004 }, "id": 126, "options": { @@ -6814,7 +7685,7 @@ "h": 1, "w": 24, "x": 0, - "y": 45 + "y": 46 }, "id": 115, "panels": [ @@ -6883,7 +7754,7 @@ "h": 8, "w": 12, "x": 0, - "y": 53 + "y": 10381 }, "id": 101, "options": { @@ -6977,7 +7848,7 @@ "h": 8, "w": 12, "x": 12, - "y": 53 + "y": 10381 }, "id": 187, "options": { @@ -7072,7 +7943,7 @@ "h": 8, "w": 12, "x": 0, - "y": 61 + "y": 10389 }, "id": 113, "options": { @@ -7166,7 +8037,7 @@ "h": 8, "w": 12, "x": 12, - "y": 61 + "y": 10389 }, "id": 103, "options": { @@ -7261,7 +8132,7 @@ "h": 8, "w": 12, "x": 0, - "y": 69 + "y": 10397 }, "id": 102, "options": { @@ -7356,7 +8227,7 @@ "h": 8, "w": 12, "x": 12, - "y": 69 + "y": 10397 }, "id": 116, "options": { @@ -7450,7 +8321,7 @@ "h": 8, "w": 12, "x": 0, - "y": 77 + "y": 10405 }, "id": 135, "options": { @@ -7545,7 +8416,7 @@ "h": 8, "w": 12, "x": 12, - "y": 77 + "y": 10405 }, "id": 134, "options": { @@ -7639,7 +8510,7 @@ "h": 8, "w": 12, "x": 0, - "y": 85 + "y": 10413 }, "id": 136, "options": { @@ -7734,7 +8605,7 @@ "h": 8, "w": 12, "x": 12, - "y": 85 + "y": 10413 }, "id": 159, "options": { @@ -7773,7 +8644,7 @@ "h": 1, "w": 24, "x": 0, - "y": 46 + "y": 47 }, "id": 193, "panels": [ @@ -7842,7 +8713,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5130 + "y": 15530 }, "id": 141, "options": { @@ -7936,7 +8807,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5130 + "y": 15530 }, "id": 148, "options": { @@ -8030,7 +8901,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5138 + "y": 15538 }, "id": 142, "options": { @@ -8124,7 +8995,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5138 + "y": 15538 }, "id": 149, "options": { @@ -8218,7 +9089,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5146 + "y": 15546 }, "id": 143, "options": { @@ -8312,7 +9183,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5146 + "y": 15546 }, "id": 150, "options": { @@ -8406,7 +9277,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5154 + "y": 15554 }, "id": 144, "options": { @@ -8500,7 +9371,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5154 + "y": 15554 }, "id": 151, "options": { @@ -8594,7 +9465,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5162 + "y": 15562 }, "id": 145, "options": { @@ -8688,7 +9559,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5162 + "y": 15562 }, "id": 152, "options": { @@ -8782,7 +9653,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5170 + "y": 15570 }, "id": 146, "options": { @@ -8876,7 +9747,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5170 + "y": 15570 }, "id": 153, "options": { @@ -8970,7 +9841,7 @@ "h": 8, "w": 12, "x": 0, - "y": 5178 + "y": 15578 }, "id": 147, "options": { @@ -9064,7 +9935,7 @@ "h": 8, "w": 12, "x": 12, - "y": 5178 + "y": 15578 }, "id": 154, "options": { @@ -9103,7 +9974,7 @@ "h": 1, "w": 24, "x": 0, - "y": 47 + "y": 48 }, "id": 192, "panels": [ @@ -9172,7 +10043,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4906 + "y": 15306 }, "id": 190, "options": { @@ -9267,7 +10138,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4906 + "y": 15306 }, "id": 184, "options": { @@ -9361,7 +10232,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4914 + "y": 15314 }, "id": 188, "options": { @@ -9456,7 +10327,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4914 + "y": 15314 }, "id": 186, "options": { @@ -9551,7 +10422,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4922 + "y": 15322 }, "id": 185, "options": { @@ -9645,7 +10516,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4922 + "y": 15322 }, "id": 189, "options": { @@ -9739,7 +10610,7 @@ "h": 8, "w": 12, "x": 0, - "y": 4930 + "y": 15330 }, "id": 181, "options": { @@ -9834,7 +10705,7 @@ "h": 8, "w": 12, "x": 12, - "y": 4930 + "y": 15330 }, "id": 182, "options": { @@ -9873,7 +10744,7 @@ "h": 1, "w": 24, "x": 0, - "y": 48 + "y": 49 }, "id": 194, "panels": [ @@ -9942,7 +10813,7 @@ "h": 8, "w": 12, "x": 0, - "y": 856 + "y": 11256 }, "id": 170, "options": { @@ -10037,7 +10908,7 @@ "h": 8, "w": 12, "x": 12, - "y": 856 + "y": 11256 }, "id": 171, "options": { @@ -10131,7 +11002,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1256 + "y": 11656 }, "id": 162, "options": { @@ -10226,7 +11097,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1256 + "y": 11656 }, "id": 108, "options": { @@ -10321,7 +11192,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1264 + "y": 11664 }, "id": 169, "options": { @@ -10415,7 +11286,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1264 + "y": 11664 }, "id": 166, "options": { @@ -10509,7 +11380,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1272 + "y": 11672 }, "id": 157, "options": { @@ -10604,7 +11475,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1272 + "y": 11672 }, "id": 158, "options": { @@ -10698,7 +11569,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1280 + "y": 11680 }, "id": 167, "options": { @@ -10793,7 +11664,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1280 + "y": 11680 }, "id": 168, "options": { @@ -10887,7 +11758,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1288 + "y": 11688 }, "id": 137, "options": { @@ -10982,7 +11853,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1288 + "y": 11688 }, "id": 183, "options": { @@ -11077,7 +11948,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1296 + "y": 11696 }, "id": 241, "options": { @@ -11172,7 +12043,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1296 + "y": 11696 }, "id": 242, "options": { @@ -11267,7 +12138,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1304 + "y": 11704 }, "id": 243, "options": { @@ -11362,7 +12233,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1304 + "y": 11704 }, "id": 244, "options": { @@ -11457,7 +12328,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1312 + "y": 11712 }, "id": 245, "options": { @@ -11552,7 +12423,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1312 + "y": 11712 }, "id": 246, "options": { @@ -11647,7 +12518,7 @@ "h": 8, "w": 12, "x": 0, - "y": 1320 + "y": 11720 }, "id": 247, "options": { @@ -11742,7 +12613,7 @@ "h": 8, "w": 12, "x": 12, - "y": 1320 + "y": 11720 }, "id": 248, "options": { @@ -11781,7 +12652,7 @@ "h": 1, "w": 24, "x": 0, - "y": 49 + "y": 50 }, "id": 195, "panels": [ @@ -11850,7 +12721,7 @@ "h": 8, "w": 12, "x": 0, - "y": 81 + "y": 10481 }, "id": 161, "options": { @@ -11944,7 +12815,7 @@ "h": 8, "w": 12, "x": 12, - "y": 81 + "y": 10481 }, "id": 104, "options": { @@ -12039,7 +12910,7 @@ "h": 8, "w": 12, "x": 0, - "y": 89 + "y": 10489 }, "id": 105, "options": { @@ -12133,7 +13004,7 @@ "h": 8, "w": 12, "x": 12, - "y": 89 + "y": 10489 }, "id": 164, "options": { @@ -12227,7 +13098,7 @@ "h": 8, "w": 12, "x": 0, - "y": 97 + "y": 10497 }, "id": 163, "options": { @@ -12322,7 +13193,7 @@ "h": 8, "w": 12, "x": 12, - "y": 97 + "y": 10497 }, "id": 165, "options": { @@ -12417,7 +13288,7 @@ "h": 8, "w": 12, "x": 0, - "y": 105 + "y": 10505 }, "id": 223, "options": { @@ -12456,7 +13327,7 @@ "h": 1, "w": 24, "x": 0, - "y": 50 + "y": 51 }, "id": 210, "panels": [ @@ -12526,7 +13397,7 @@ "h": 8, "w": 12, "x": 0, - "y": 858 + "y": 11258 }, "id": 211, "options": { @@ -12621,7 +13492,7 @@ "h": 8, "w": 12, "x": 12, - "y": 858 + "y": 11258 }, "id": 212, "options": { @@ -12716,7 +13587,7 @@ "h": 8, "w": 12, "x": 0, - "y": 938 + "y": 11338 }, "id": 213, "options": { @@ -12811,7 +13682,7 @@ "h": 8, "w": 12, "x": 12, - "y": 938 + "y": 11338 }, "id": 214, "options": { @@ -12906,7 +13777,7 @@ "h": 8, "w": 12, "x": 0, - "y": 946 + "y": 11346 }, "id": 215, "options": { @@ -13001,7 +13872,7 @@ "h": 8, "w": 12, "x": 12, - "y": 946 + "y": 11346 }, "id": 216, "options": { @@ -13096,7 +13967,7 @@ "h": 8, "w": 12, "x": 0, - "y": 954 + "y": 11354 }, "id": 217, "options": { @@ -13191,7 +14062,7 @@ "h": 8, "w": 12, "x": 12, - "y": 954 + "y": 11354 }, "id": 218, "options": { @@ -13230,7 +14101,7 @@ "h": 1, "w": 24, "x": 0, - "y": 51 + "y": 52 }, "id": 230, "panels": [ @@ -13300,7 +14171,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3700 + "y": 14100 }, "id": 231, "options": { @@ -13394,7 +14265,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3700 + "y": 14100 }, "id": 178, "options": { @@ -13489,7 +14360,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3708 + "y": 14108 }, "id": 179, "options": { @@ -13583,7 +14454,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3708 + "y": 14108 }, "id": 156, "options": { @@ -13622,7 +14493,7 @@ "h": 1, "w": 24, "x": 0, - "y": 52 + "y": 53 }, "id": 250, "panels": [ @@ -13692,7 +14563,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3492 + "y": 13892 }, "id": 251, "options": { @@ -13787,7 +14658,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3492 + "y": 13892 }, "id": 252, "options": { @@ -13882,7 +14753,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3604 + "y": 14004 }, "id": 253, "options": { @@ -13977,7 +14848,7 @@ "h": 8, "w": 12, "x": 12, - "y": 3604 + "y": 14004 }, "id": 254, "options": { @@ -14072,7 +14943,7 @@ "h": 8, "w": 12, "x": 0, - "y": 3612 + "y": 14012 }, "id": 273, "options": { @@ -14111,7 +14982,7 @@ "h": 1, "w": 24, "x": 0, - "y": 53 + "y": 54 }, "id": 100, "panels": [ @@ -14180,7 +15051,7 @@ "h": 8, "w": 12, "x": 0, - "y": 861 + "y": 11261 }, "id": 107, "options": { @@ -14275,7 +15146,7 @@ "h": 8, "w": 12, "x": 12, - "y": 861 + "y": 11261 }, "id": 110, "options": { @@ -14370,7 +15241,7 @@ "h": 8, "w": 12, "x": 0, - "y": 869 + "y": 11269 }, "id": 180, "options": { @@ -14464,7 +15335,7 @@ "h": 8, "w": 12, "x": 12, - "y": 869 + "y": 11269 }, "id": 160, "options": { @@ -14558,7 +15429,7 @@ "h": 8, "w": 12, "x": 0, - "y": 877 + "y": 11277 }, "id": 139, "options": { @@ -14652,7 +15523,7 @@ "h": 8, "w": 12, "x": 12, - "y": 877 + "y": 11277 }, "id": 176, "options": { @@ -14746,7 +15617,7 @@ "h": 8, "w": 12, "x": 0, - "y": 885 + "y": 11285 }, "id": 133, "options": { @@ -14841,7 +15712,7 @@ "h": 8, "w": 12, "x": 12, - "y": 885 + "y": 11285 }, "id": 221, "options": { @@ -14935,7 +15806,7 @@ "h": 8, "w": 12, "x": 0, - "y": 893 + "y": 11293 }, "id": 177, "options": { @@ -15030,7 +15901,7 @@ "h": 8, "w": 12, "x": 12, - "y": 893 + "y": 11293 }, "id": 271, "options": { @@ -15125,7 +15996,7 @@ "h": 8, "w": 12, "x": 0, - "y": 901 + "y": 11301 }, "id": 274, "options": { @@ -15220,7 +16091,7 @@ "h": 8, "w": 12, "x": 12, - "y": 901 + "y": 11301 }, "id": 272, "options": { @@ -15315,7 +16186,7 @@ "h": 8, "w": 12, "x": 0, - "y": 909 + "y": 11309 }, "id": 232, "options": { @@ -15410,7 +16281,7 @@ "h": 8, "w": 12, "x": 12, - "y": 909 + "y": 11309 }, "id": 233, "options": { @@ -15505,7 +16376,7 @@ "h": 8, "w": 12, "x": 0, - "y": 917 + "y": 11317 }, "id": 234, "options": { @@ -15600,7 +16471,7 @@ "h": 8, "w": 12, "x": 12, - "y": 917 + "y": 11317 }, "id": 235, "options": { @@ -15695,7 +16566,7 @@ "h": 8, "w": 12, "x": 0, - "y": 925 + "y": 11325 }, "id": 222, "options": { @@ -15744,6 +16615,6 @@ "timezone": "browser", "title": "CryptoSim", "uid": "adnqfm4", - "version": 30, + "version": 11, "weekStart": "" -} +} \ No newline at end of file diff --git a/sei-db/common/metrics/phase_timer.go b/sei-db/common/metrics/phase_timer.go index 06700257db..3a4c5eaadb 100644 --- a/sei-db/common/metrics/phase_timer.go +++ b/sei-db/common/metrics/phase_timer.go @@ -30,6 +30,7 @@ func NewPhaseTimerFactory(meter metric.Meter, timerName string) *PhaseTimerFacto timerName+"_phase_latency_seconds", metric.WithDescription("Latency per phase (seconds); use for p99, p95, etc."), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(LatencyBuckets...), ) return &PhaseTimerFactory{ phaseDurationTotal: phaseDurationTotal, diff --git a/sei-db/common/threading/fixed_pool.go b/sei-db/common/threading/fixed_pool.go index 2702741ce4..2e393ef5c3 100644 --- a/sei-db/common/threading/fixed_pool.go +++ b/sei-db/common/threading/fixed_pool.go @@ -26,6 +26,10 @@ func NewFixedPool( queueSize int, ) Pool { + if workers <= 0 { + workers = 1 + } + workQueue := make(chan func(), queueSize) fp := &fixedPool{ workQueue: workQueue, diff --git a/sei-db/config/sc_config.go b/sei-db/config/sc_config.go index 830cc7ab48..fc1de69c65 100644 --- a/sei-db/config/sc_config.go +++ b/sei-db/config/sc_config.go @@ -67,7 +67,7 @@ func DefaultStateCommitConfig() StateCommitConfig { ReadMode: CosmosOnlyRead, EnableLatticeHash: false, MemIAVLConfig: memiavl.DefaultConfig(), - FlatKVConfig: flatkv.DefaultConfig(), + FlatKVConfig: *flatkv.DefaultConfig(), HistoricalProofMaxInFlight: DefaultSCHistoricalProofMaxInFlight, HistoricalProofRateLimit: DefaultSCHistoricalProofRateLimit, diff --git a/sei-db/db_engine/dbcache/cache.go b/sei-db/db_engine/dbcache/cache.go index ccbaf6464c..604cd4d7d7 100644 --- a/sei-db/db_engine/dbcache/cache.go +++ b/sei-db/db_engine/dbcache/cache.go @@ -3,7 +3,6 @@ package dbcache import ( "context" "fmt" - "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -93,32 +92,18 @@ func (u *CacheUpdate) IsDelete() bool { return u.Value == nil } -// BuildCache creates a new Cache. +// BuildCache creates a new Cache. When cfg.MaxSize is 0 a no-op (passthrough) cache is returned. func BuildCache( ctx context.Context, - shardCount uint64, - maxSize uint64, + cfg *CacheConfig, readPool threading.Pool, miscPool threading.Pool, - estimatedOverheadPerEntry uint64, - cacheName string, - metricsScrapeInterval time.Duration, ) (Cache, error) { - - if maxSize == 0 { + if cfg.MaxSize == 0 { return NewNoOpCache(), nil } - cache, err := NewStandardCache( - ctx, - shardCount, - maxSize, - readPool, - miscPool, - estimatedOverheadPerEntry, - cacheName, - metricsScrapeInterval, - ) + cache, err := NewStandardCache(ctx, cfg, readPool, miscPool) if err != nil { return nil, fmt.Errorf("failed to create cache: %w", err) } diff --git a/sei-db/db_engine/dbcache/cache_config.go b/sei-db/db_engine/dbcache/cache_config.go new file mode 100644 index 0000000000..703653fab7 --- /dev/null +++ b/sei-db/db_engine/dbcache/cache_config.go @@ -0,0 +1,43 @@ +package dbcache + +import ( + "fmt" + "time" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" +) + +// CacheConfig defines configuration for a sharded LRU read-through cache. +type CacheConfig struct { + // The number of shards in the cache. Must be a power of two and greater than 0. + ShardCount uint64 + // The maximum size of the cache, in bytes. 0 disables the cache. + MaxSize uint64 + // The estimated overhead per entry, in bytes. Used to calculate effective cache + // capacity. Derive experimentally; may differ between builds and architectures. + EstimatedOverheadPerEntry uint64 + // Name used as the "cache" attribute on OTel metrics. Empty string disables metrics. + MetricsName string + // How often to scrape cache size for metrics. Ignored if MetricsName is empty. + MetricsScrapeInterval time.Duration +} + +// DefaultCacheConfig returns a CacheConfig with sensible defaults. +func DefaultCacheConfig() CacheConfig { + return CacheConfig{ + ShardCount: 8, + MaxSize: 512 * unit.MB, + EstimatedOverheadPerEntry: DefaultEstimatedOverheadPerEntry, + } +} + +// Validate checks that the configuration is sane and returns an error if it is not. +func (c *CacheConfig) Validate() error { + if c.MaxSize > 0 && (c.ShardCount == 0 || (c.ShardCount&(c.ShardCount-1)) != 0) { + return fmt.Errorf("shard count must be a non-zero power of two") + } + if c.MetricsName != "" && c.MetricsScrapeInterval <= 0 { + return fmt.Errorf("metrics scrape interval must be positive when metrics name is set") + } + return nil +} diff --git a/sei-db/db_engine/dbcache/cache_impl.go b/sei-db/db_engine/dbcache/cache_impl.go index 1292f74caf..8dc5704f50 100644 --- a/sei-db/db_engine/dbcache/cache_impl.go +++ b/sei-db/db_engine/dbcache/cache_impl.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "sync" - "time" "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" @@ -29,45 +28,33 @@ type cache struct { miscPool threading.Pool } -// Creates a new Cache. If cacheName is non-empty, OTel metrics are enabled and the -// background size scrape runs every metricsScrapeInterval. +// Creates a new Cache. If cfg.MetricsName is non-empty, OTel metrics are enabled and the +// background size scrape runs every cfg.MetricsScrapeInterval. func NewStandardCache( ctx context.Context, - // The number of shards in the cache. Must be a power of two and greater than 0. - shardCount uint64, - // The maximum size of the cache, in bytes. - maxSize uint64, - // A work pool for reading from the DB. + cfg *CacheConfig, readPool threading.Pool, - // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. miscPool threading.Pool, - // The estimated overhead per entry, in bytes. This is used to calculate the maximum size of the cache. - // This value should be derived experimentally, and may differ between different builds and architectures. - estimatedOverheadPerEntry uint64, - // Name used as the "cache" attribute on metrics. Empty string disables metrics. - cacheName string, - // How often to scrape cache size for metrics. Ignored if cacheName is empty. - metricsScrapeInterval time.Duration, ) (Cache, error) { - if shardCount == 0 || (shardCount&(shardCount-1)) != 0 { + if cfg.ShardCount == 0 || (cfg.ShardCount&(cfg.ShardCount-1)) != 0 { return nil, ErrNumShardsNotPowerOfTwo } - if maxSize == 0 { + if cfg.MaxSize == 0 { return nil, fmt.Errorf("maxSize must be greater than 0") } - shardManager, err := newShardManager(shardCount) + shardManager, err := newShardManager(cfg.ShardCount) if err != nil { return nil, fmt.Errorf("failed to create shard manager: %w", err) } - sizePerShard := maxSize / shardCount + sizePerShard := cfg.MaxSize / cfg.ShardCount if sizePerShard == 0 { return nil, fmt.Errorf("maxSize must be greater than shardCount") } - shards := make([]*shard, shardCount) - for i := uint64(0); i < shardCount; i++ { - shards[i], err = NewShard(ctx, readPool, sizePerShard, estimatedOverheadPerEntry) + shards := make([]*shard, cfg.ShardCount) + for i := uint64(0); i < cfg.ShardCount; i++ { + shards[i], err = NewShard(ctx, readPool, sizePerShard, cfg.EstimatedOverheadPerEntry) if err != nil { return nil, fmt.Errorf("failed to create shard: %w", err) } @@ -81,8 +68,8 @@ func NewStandardCache( miscPool: miscPool, } - if cacheName != "" { - metrics := newCacheMetrics(ctx, cacheName, metricsScrapeInterval, c.getCacheSizeInfo) + if cfg.MetricsName != "" { + metrics := newCacheMetrics(ctx, cfg.MetricsName, cfg.MetricsScrapeInterval, c.getCacheSizeInfo) for _, s := range c.shards { s.metrics = metrics } diff --git a/sei-db/db_engine/dbcache/cache_impl_test.go b/sei-db/db_engine/dbcache/cache_impl_test.go index 5433019c93..4c44d5283a 100644 --- a/sei-db/db_engine/dbcache/cache_impl_test.go +++ b/sei-db/db_engine/dbcache/cache_impl_test.go @@ -31,7 +31,9 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin return v, true, nil } pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), shardCount, maxSize, pool, pool, 16, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: shardCount, MaxSize: maxSize, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.NoError(t, err) return c, read } @@ -42,42 +44,54 @@ func newTestCache(t *testing.T, store map[string][]byte, shardCount, maxSize uin func TestNewStandardCacheValid(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 4, 1024, pool, pool, 16, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 4, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheSingleShard(t *testing.T) { pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 1, 1024, pool, pool, 16, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 1, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.NoError(t, err) require.NotNil(t, c) } func TestNewStandardCacheShardCountZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), 0, 1024, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 0, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err) } func TestNewStandardCacheShardCountNotPowerOfTwo(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{3, 5, 6, 7, 9, 10} { - _, err := NewStandardCache(context.Background(), n, 1024, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: n, MaxSize: 1024, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err, "shardCount=%d", n) } } func TestNewStandardCacheMaxSizeZero(t *testing.T) { pool := threading.NewAdHocPool() - _, err := NewStandardCache(context.Background(), 4, 0, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 4, MaxSize: 0, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err) } func TestNewStandardCacheMaxSizeLessThanShardCount(t *testing.T) { pool := threading.NewAdHocPool() // shardCount=4, maxSize=3 → sizePerShard=0 - _, err := NewStandardCache(context.Background(), 4, 3, pool, pool, 16, "", 0) + _, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 4, MaxSize: 3, EstimatedOverheadPerEntry: 16, + }, pool, pool) require.Error(t, err) } @@ -85,7 +99,9 @@ func TestNewStandardCacheWithMetrics(t *testing.T) { pool := threading.NewAdHocPool() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - c, err := NewStandardCache(ctx, 2, 1024, pool, pool, 0, "test-cache", time.Hour) + c, err := NewStandardCache(ctx, &CacheConfig{ + ShardCount: 2, MaxSize: 1024, MetricsName: "test-cache", MetricsScrapeInterval: time.Hour, + }, pool, pool) require.NoError(t, err) require.NotNil(t, c) } @@ -144,7 +160,7 @@ func TestCacheGetDBError(t *testing.T) { dbErr := errors.New("db fail") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, pool, pool) _, _, err := c.Get(readFunc, []byte("k"), true) require.Error(t, err) @@ -158,7 +174,7 @@ func TestCacheGetSameKeyConsistentShard(t *testing.T) { return []byte("val"), true, nil } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 4, 4096, pool, pool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 4, MaxSize: 4096}, pool, pool) val1, _, _ := c.Get(readFunc, []byte("key"), true) val2, _, _ := c.Get(readFunc, []byte("key"), true) @@ -327,7 +343,7 @@ func TestCacheBatchSetEmpty(t *testing.T) { func TestCacheBatchSetPoolFailure(t *testing.T) { readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, readPool, &failPool{}) err := c.BatchSet([]CacheUpdate{ {Key: []byte("k"), Value: []byte("v")}, @@ -405,7 +421,7 @@ func TestCacheBatchGetDBError(t *testing.T) { dbErr := errors.New("broken") readFunc := func(key []byte) ([]byte, bool, error) { return nil, false, dbErr } pool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, pool, pool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, pool, pool) keys := map[string]types.BatchGetResult{"fail": {}} require.NoError(t, c.BatchGet(readFunc, keys), "BatchGet itself should not fail") @@ -420,7 +436,7 @@ func TestCacheBatchGetEmpty(t *testing.T) { func TestCacheBatchGetPoolFailure(t *testing.T) { readPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, readPool, &failPool{}, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, readPool, &failPool{}) keys := map[string]types.BatchGetResult{"k": {}} err := c.BatchGet(noopRead, keys) @@ -429,7 +445,7 @@ func TestCacheBatchGetPoolFailure(t *testing.T) { func TestCacheBatchGetShardReadPoolFailure(t *testing.T) { miscPool := threading.NewAdHocPool() - c, _ := NewStandardCache(context.Background(), 1, 4096, &failPool{}, miscPool, 0, "", 0) + c, _ := NewStandardCache(context.Background(), &CacheConfig{ShardCount: 1, MaxSize: 4096}, &failPool{}, miscPool) keys := map[string]types.BatchGetResult{"a": {}, "b": {}} require.NoError(t, c.BatchGet(noopRead, keys)) @@ -505,7 +521,9 @@ func TestCacheGetCacheSizeInfoAggregatesShards(t *testing.T) { func TestCacheSizeInfoIncludesOverhead(t *testing.T) { const overhead = 200 pool := threading.NewAdHocPool() - c, err := NewStandardCache(context.Background(), 1, 100_000, pool, pool, overhead, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 1, MaxSize: 100_000, EstimatedOverheadPerEntry: overhead, + }, pool, pool) require.NoError(t, err) impl := c.(*cache) @@ -523,7 +541,9 @@ func TestCacheOverheadCausesEarlierEviction(t *testing.T) { pool := threading.NewAdHocPool() // Single shard, maxSize=500. Each 10-byte value entry costs 1+10+200=211 bytes. // Two entries = 422 < 500. Three entries = 633 > 500, so one must be evicted. - c, err := NewStandardCache(context.Background(), 1, 500, pool, pool, overhead, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ + ShardCount: 1, MaxSize: 500, EstimatedOverheadPerEntry: overhead, + }, pool, pool) require.NoError(t, err) impl := c.(*cache) @@ -731,7 +751,7 @@ func TestCacheBatchGetAfterBatchSetWithDeletes(t *testing.T) { func TestNewStandardCachePowerOfTwoShardCounts(t *testing.T) { pool := threading.NewAdHocPool() for _, n := range []uint64{1, 2, 4, 8, 16, 32, 64} { - c, err := NewStandardCache(context.Background(), n, n*100, pool, pool, 0, "", 0) + c, err := NewStandardCache(context.Background(), &CacheConfig{ShardCount: n, MaxSize: n * 100}, pool, pool) require.NoError(t, err, "shardCount=%d", n) require.NotNil(t, c, "shardCount=%d", n) } diff --git a/sei-db/db_engine/pebbledb/batch.go b/sei-db/db_engine/pebbledb/batch.go index 032bc3fb5d..1ad8d0f4e1 100644 --- a/sei-db/db_engine/pebbledb/batch.go +++ b/sei-db/db_engine/pebbledb/batch.go @@ -1,6 +1,8 @@ package pebbledb import ( + "fmt" + "github.com/cockroachdb/pebble/v2" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) @@ -14,26 +16,24 @@ type pebbleBatch struct { var _ types.Batch = (*pebbleBatch)(nil) -func newPebbleBatch(db *pebble.DB) *pebbleBatch { - return &pebbleBatch{b: db.NewBatch()} -} - func (p *pebbleDB) NewBatch() types.Batch { - return newPebbleBatch(p.db) + return &pebbleBatch{b: p.db.NewBatch()} } func (pb *pebbleBatch) Set(key, value []byte) error { - // Durability options are applied on Commit. return pb.b.Set(key, value, nil) } func (pb *pebbleBatch) Delete(key []byte) error { - // Durability options are applied on Commit. return pb.b.Delete(key, nil) } func (pb *pebbleBatch) Commit(opts types.WriteOptions) error { - return pb.b.Commit(toPebbleWriteOpts(opts)) + err := pb.b.Commit(toPebbleWriteOpts(opts)) + if err != nil { + return fmt.Errorf("failed to commit batch: %w", err) + } + return nil } func (pb *pebbleBatch) Len() int { diff --git a/sei-db/db_engine/pebbledb/db.go b/sei-db/db_engine/pebbledb/db.go index e2f0000ecd..623abcceb6 100644 --- a/sei-db/db_engine/pebbledb/db.go +++ b/sei-db/db_engine/pebbledb/db.go @@ -6,19 +6,18 @@ import ( "errors" "fmt" "path/filepath" - "time" "github.com/cockroachdb/pebble/v2" "github.com/cockroachdb/pebble/v2/bloom" "github.com/cockroachdb/pebble/v2/sstable" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" - "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -const metricsScrapeInterval = 10 * time.Second - // pebbleDB implements the db_engine.DB interface using PebbleDB. type pebbleDB struct { db *pebble.DB @@ -27,29 +26,22 @@ type pebbleDB struct { var _ types.KeyValueDB = (*pebbleDB)(nil) -// Open opens (or creates) a Pebble-backed DB at path, returning the DB interface. +// Open opens (or creates) a Pebble-backed DB at path, returning a KeyValueDB func Open( ctx context.Context, - path string, - opts types.OpenOptions, - enableMetrics bool, + config *PebbleDBConfig, ) (_ types.KeyValueDB, err error) { - // Validate options before allocating resources to avoid leaks on validation failure - var cmp *pebble.Comparer - if opts.Comparer != nil { - var ok bool - cmp, ok = opts.Comparer.(*pebble.Comparer) - if !ok { - return nil, fmt.Errorf("OpenOptions.Comparer must be *pebble.Comparer, got %T", opts.Comparer) - } + + if err := config.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate config: %w", err) } - cache := pebble.NewCache(1024 * 1024 * 512) // 512MB cache - defer cache.Unref() + pebbleCache := pebble.NewCache(int64(512 * unit.MB)) + defer pebbleCache.Unref() popts := &pebble.Options{ - Cache: cache, - Comparer: cmp, + Cache: pebbleCache, + Comparer: pebble.DefaultComparer, // FormatMajorVersion is pinned to a specific version to prevent accidental // breaking changes when updating the pebble dependency. Using FormatNewest // would cause the on-disk format to silently upgrade when pebble is updated, @@ -87,39 +79,88 @@ func Open( // at the bottom level since most data lives there and false positive rate is low popts.Levels[6].FilterPolicy = nil - db, err := pebble.Open(path, popts) + db, err := pebble.Open(config.DataDir, popts) if err != nil { return nil, err } ctx, cancel := context.WithCancel(ctx) - if enableMetrics { - metrics.NewPebbleMetrics(ctx, db, filepath.Base(path), metricsScrapeInterval) + if config.EnableMetrics { + NewPebbleMetrics(ctx, db, filepath.Base(config.DataDir), config.MetricsScrapeInterval) + } + + return &pebbleDB{ + db: db, + metricsCancel: cancel, + }, nil +} + +// OpenWithCache opens a Pebble-backed DB and wraps it with a read-through cache. +// When cacheConfig.MaxSize is 0 a no-op (passthrough) cache is used. +func OpenWithCache( + ctx context.Context, + config *PebbleDBConfig, + cacheConfig *dbcache.CacheConfig, + readPool threading.Pool, + miscPool threading.Pool, +) (types.KeyValueDB, error) { + db, err := Open(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to open database: %w", err) + } + + cache, err := dbcache.BuildCache(ctx, cacheConfig, readPool, miscPool) + if err != nil { + _ = db.Close() + return nil, fmt.Errorf("failed to create cache: %w", err) } - return &pebbleDB{db: db, metricsCancel: cancel}, nil + return dbcache.NewCachedKeyValueDB(db, cache), nil } func (p *pebbleDB) Get(key []byte) ([]byte, error) { - // Pebble returns a zero-copy view plus a closer; we copy and close internally. val, closer, err := p.db.Get(key) if err != nil { if errors.Is(err, pebble.ErrNotFound) { return nil, errorutils.ErrNotFound } - return nil, err + return nil, fmt.Errorf("failed to get value from database: %w", err) } cloned := bytes.Clone(val) _ = closer.Close() return cloned, nil } +func (p *pebbleDB) BatchGet(keys map[string]types.BatchGetResult) error { + for k := range keys { + val, err := p.Get([]byte(k)) + if err != nil { + if errorutils.IsNotFound(err) { + keys[k] = types.BatchGetResult{} + } else { + keys[k] = types.BatchGetResult{Error: err} + } + } else { + keys[k] = types.BatchGetResult{Value: val} + } + } + return nil +} + func (p *pebbleDB) Set(key, value []byte, opts types.WriteOptions) error { - return p.db.Set(key, value, toPebbleWriteOpts(opts)) + err := p.db.Set(key, value, toPebbleWriteOpts(opts)) + if err != nil { + return fmt.Errorf("failed to set value in database: %w", err) + } + return nil } func (p *pebbleDB) Delete(key []byte, opts types.WriteOptions) error { - return p.db.Delete(key, toPebbleWriteOpts(opts)) + err := p.db.Delete(key, toPebbleWriteOpts(opts)) + if err != nil { + return fmt.Errorf("failed to delete value in database: %w", err) + } + return nil } func (p *pebbleDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, error) { @@ -138,7 +179,12 @@ func (p *pebbleDB) NewIter(opts *types.IterOptions) (types.KeyValueDBIterator, e } func (p *pebbleDB) Flush() error { - return p.db.Flush() + err := p.db.Flush() + if err != nil { + return fmt.Errorf("failed to flush database: %w", err) + } + + return nil } func (p *pebbleDB) Checkpoint(destDir string) error { diff --git a/sei-db/db_engine/pebbledb/db_test.go b/sei-db/db_engine/pebbledb/db_test.go index 9d8f7cd400..17c13899b5 100644 --- a/sei-db/db_engine/pebbledb/db_test.go +++ b/sei-db/db_engine/pebbledb/db_test.go @@ -1,425 +1,236 @@ package pebbledb import ( - "bytes" "testing" "github.com/stretchr/testify/require" - "github.com/cockroachdb/pebble/v2" errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" ) -func TestDBGetSetDelete(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) +// forEachCacheMode runs fn once with a warm cache and once with caching disabled, +// so cache-sensitive tests exercise both the cache and the raw storage layer. +func forEachCacheMode(t *testing.T, fn func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig)) { + for _, mode := range []struct { + name string + cacheSize uint64 + }{ + {"cached", 16 * unit.MB}, + {"uncached", 0}, + } { + t.Run(mode.name, func(t *testing.T) { + cfg := DefaultTestConfig(t) + cacheCfg := DefaultTestCacheConfig() + cacheCfg.MaxSize = mode.cacheSize + fn(t, cfg, cacheCfg) + }) } +} + +func openDB(t *testing.T, cfg *PebbleDBConfig, cacheCfg *dbcache.CacheConfig) types.KeyValueDB { + t.Helper() + db, err := OpenWithCache(t.Context(), cfg, cacheCfg, + threading.NewAdHocPool(), threading.NewAdHocPool()) + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) + return db +} - key := []byte("k1") - val := []byte("v1") +// --------------------------------------------------------------------------- +// Cache-sensitive tests — run in both cached and uncached modes +// --------------------------------------------------------------------------- - _, err = db.Get(key) - if err != errorutils.ErrNotFound { - t.Fatalf("expected ErrNotFound, got %v", err) - } +func TestDBGetSetDelete(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) - if err := db.Set(key, val, types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } + key := []byte("k1") + val := []byte("v1") - got, err := db.Get(key) - if err != nil { - t.Fatalf("Get: %v", err) - } - if !bytes.Equal(got, val) { - t.Fatalf("value mismatch: got %q want %q", got, val) - } + _, err := db.Get(key) + require.ErrorIs(t, err, errorutils.ErrNotFound) - if err := db.Delete(key, types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Delete: %v", err) - } + require.NoError(t, db.Set(key, val, types.WriteOptions{Sync: false})) - _, err = db.Get(key) - if err != errorutils.ErrNotFound { - t.Fatalf("expected ErrNotFound after delete, got %v", err) - } + got, err := db.Get(key) + require.NoError(t, err) + require.Equal(t, val, got) + + require.NoError(t, db.Delete(key, types.WriteOptions{Sync: false})) + + _, err = db.Get(key) + require.ErrorIs(t, err, errorutils.ErrNotFound) + }) } func TestBatchAtomicWrite(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - b := db.NewBatch() - t.Cleanup(func() { require.NoError(t, b.Close()) }) + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) - if err := b.Set([]byte("a"), []byte("1")); err != nil { - t.Fatalf("batch set: %v", err) - } - if err := b.Set([]byte("b"), []byte("2")); err != nil { - t.Fatalf("batch set: %v", err) - } + b := db.NewBatch() + t.Cleanup(func() { require.NoError(t, b.Close()) }) - if err := b.Commit(types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("batch commit: %v", err) - } + require.NoError(t, b.Set([]byte("a"), []byte("1"))) + require.NoError(t, b.Set([]byte("b"), []byte("2"))) + require.NoError(t, b.Commit(types.WriteOptions{Sync: false})) - for _, tc := range []struct { - k string - v string - }{ - {"a", "1"}, - {"b", "2"}, - } { - got, err := db.Get([]byte(tc.k)) - if err != nil { - t.Fatalf("Get(%q): %v", tc.k, err) + for _, tc := range []struct{ k, v string }{{"a", "1"}, {"b", "2"}} { + got, err := db.Get([]byte(tc.k)) + require.NoError(t, err, "key=%q", tc.k) + require.Equal(t, tc.v, string(got), "key=%q", tc.k) } - if string(got) != tc.v { - t.Fatalf("Get(%q)=%q want %q", tc.k, got, tc.v) - } - } + }) } -func TestIteratorBounds(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - // Keys: a, b, c - for _, k := range []string{"a", "b", "c"} { - if err := db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", k, err) - } - } - - itr, err := db.NewIter(&types.IterOptions{LowerBound: []byte("b"), UpperBound: []byte("d")}) - if err != nil { - t.Fatalf("NewIter: %v", err) - } - t.Cleanup(func() { require.NoError(t, itr.Close()) }) - - var keys []string - for ok := itr.First(); ok && itr.Valid(); ok = itr.Next() { - keys = append(keys, string(itr.Key())) - } - if err := itr.Error(); err != nil { - t.Fatalf("iter error: %v", err) - } - // LowerBound inclusive => includes b; UpperBound exclusive => includes c (d not present anyway) - if len(keys) != 2 || keys[0] != "b" || keys[1] != "c" { - t.Fatalf("unexpected keys: %v", keys) - } +func TestErrNotFoundConsistency(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) + + _, err := db.Get([]byte("missing-key")) + require.Error(t, err) + require.ErrorIs(t, err, errorutils.ErrNotFound) + require.True(t, errorutils.IsNotFound(err)) + }) } -func TestIteratorPrev(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - // Keys: a, b, c - for _, k := range []string{"a", "b", "c"} { - if err := db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", k, err) - } - } +func TestGetReturnsCopy(t *testing.T) { + cfg := DefaultTestConfig(t) + cacheCfg := DefaultTestCacheConfig() + cacheCfg.MaxSize = 0 + db := openDB(t, &cfg, &cacheCfg) - itr, err := db.NewIter(nil) - if err != nil { - t.Fatalf("NewIter: %v", err) - } - t.Cleanup(func() { require.NoError(t, itr.Close()) }) + require.NoError(t, db.Set([]byte("k"), []byte("v"), types.WriteOptions{Sync: false})) - if !itr.Last() || !itr.Valid() { - t.Fatalf("expected Last() to position iterator") - } - if string(itr.Key()) != "c" { - t.Fatalf("expected key=c at Last(), got %q", itr.Key()) - } + got, err := db.Get([]byte("k")) + require.NoError(t, err) + got[0] = 'X' - if !itr.Prev() || !itr.Valid() { - t.Fatalf("expected Prev() to succeed") - } - if string(itr.Key()) != "b" { - t.Fatalf("expected key=b after Prev(), got %q", itr.Key()) - } + got2, err := db.Get([]byte("k")) + require.NoError(t, err) + require.Equal(t, "v", string(got2), "stored value should remain unchanged") } -func TestIteratorNextPrefixWithComparerSplit(t *testing.T) { - // Use a custom comparer with Split that treats everything up to (and including) '/' - // as the "prefix" for NextPrefix() / prefix-based skipping. - cmp := *pebble.DefaultComparer - cmp.Name = "sei-db/test-split-on-slash" - cmp.Split = func(k []byte) int { - for i, b := range k { - if b == '/' { - return i + 1 - } - } - return len(k) - } - // NextPrefix relies on Comparer.ImmediateSuccessor to compute a key that is - // guaranteed to be greater than all keys sharing the current prefix. - // pebble.DefaultComparer.ImmediateSuccessor appends 0x00, which is not - // sufficient for our "prefix ends at '/'" convention (e.g. "a/\x00" < "a/2"). - // We provide an ImmediateSuccessor that increments the last byte (from the end) - // to produce a prefix upper bound (e.g. "a/" -> "a0"). - cmp.ImmediateSuccessor = func(dst, a []byte) []byte { - for i := len(a) - 1; i >= 0; i-- { - if a[i] != 0xff { - dst = append(dst, a[:i+1]...) - dst[len(dst)-1]++ - return dst - } - } - return append(dst, a...) - } +func TestBatchLenResetDelete(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{Comparer: &cmp}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + require.NoError(t, db.Set([]byte("to-delete"), []byte("val"), types.WriteOptions{Sync: false})) - for _, k := range []string{"a/1", "a/2", "a/3", "b/1"} { - if err := db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", k, err) - } - } + b := db.NewBatch() + t.Cleanup(func() { require.NoError(t, b.Close()) }) - itr, err := db.NewIter(nil) - if err != nil { - t.Fatalf("NewIter: %v", err) - } - t.Cleanup(func() { require.NoError(t, itr.Close()) }) + initialLen := b.Len() - if !itr.SeekGE([]byte("a/")) || !itr.Valid() { - t.Fatalf("expected SeekGE(a/) to be valid") - } - if !bytes.HasPrefix(itr.Key(), []byte("a/")) { - t.Fatalf("expected key with prefix a/, got %q", itr.Key()) - } + require.NoError(t, b.Set([]byte("a"), []byte("1"))) + require.NoError(t, b.Delete([]byte("to-delete"))) + require.Greater(t, b.Len(), initialLen) - if !itr.NextPrefix() || !itr.Valid() { - t.Fatalf("expected NextPrefix() to move to next prefix") - } - if string(itr.Key()) != "b/1" { - t.Fatalf("expected key=b/1 after NextPrefix(), got %q", itr.Key()) - } -} + b.Reset() + require.Equal(t, initialLen, b.Len()) -func TestOpenOptionsComparerTypeCheck(t *testing.T) { - dir := t.TempDir() - _, err := Open(t.Context(), dir, types.OpenOptions{Comparer: "not-a-pebble-comparer"}, false) - if err == nil { - t.Fatalf("expected error for invalid comparer type") - } -} + require.NoError(t, b.Set([]byte("b"), []byte("2"))) + require.NoError(t, b.Commit(types.WriteOptions{Sync: false})) -func TestErrNotFoundConsistency(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + got, err := db.Get([]byte("b")) + require.NoError(t, err) + require.Equal(t, "2", string(got)) + }) +} - // Test that Get on missing key returns ErrNotFound - _, err = db.Get([]byte("missing-key")) - if err == nil { - t.Fatalf("expected error for missing key") - } +func TestFlush(t *testing.T) { + forEachCacheMode(t, func(t *testing.T, cfg PebbleDBConfig, cacheCfg dbcache.CacheConfig) { + db := openDB(t, &cfg, &cacheCfg) - // Test that error is ErrNotFound - if err != errorutils.ErrNotFound { - t.Fatalf("expected ErrNotFound, got %v", err) - } + require.NoError(t, db.Set([]byte("flush-test"), []byte("val"), types.WriteOptions{Sync: false})) + require.NoError(t, db.Flush()) - // Test that IsNotFound helper works - if !errorutils.IsNotFound(err) { - t.Fatalf("IsNotFound should return true for ErrNotFound") - } + got, err := db.Get([]byte("flush-test")) + require.NoError(t, err) + require.Equal(t, "val", string(got)) + }) } -func TestGetReturnsCopy(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - key := []byte("k") - val := []byte("v") - if err := db.Set(key, val, types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } +// --------------------------------------------------------------------------- +// Cache-irrelevant tests — iterators and lifecycle, run once +// --------------------------------------------------------------------------- - got, err := db.Get(key) - if err != nil { - t.Fatalf("Get: %v", err) - } - // Modify returned slice; should not affect stored value if Get returns a copy. - got[0] = 'X' +func TestIteratorBounds(t *testing.T) { + cfg := DefaultTestConfig(t) + cacheCfg := DefaultTestCacheConfig() + db := openDB(t, &cfg, &cacheCfg) - got2, err := db.Get(key) - if err != nil { - t.Fatalf("Get: %v", err) - } - if string(got2) != "v" { - t.Fatalf("expected stored value to remain unchanged, got %q", got2) + for _, k := range []string{"a", "b", "c"} { + require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) } -} -func TestBatchLenResetDelete(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + itr, err := db.NewIter(&types.IterOptions{LowerBound: []byte("b"), UpperBound: []byte("d")}) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, itr.Close()) }) - // First, set a key so we can delete it - if err := db.Set([]byte("to-delete"), []byte("val"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) + var keys []string + for ok := itr.First(); ok && itr.Valid(); ok = itr.Next() { + keys = append(keys, string(itr.Key())) } + require.NoError(t, itr.Error()) + require.Equal(t, []string{"b", "c"}, keys) +} - b := db.NewBatch() - t.Cleanup(func() { require.NoError(t, b.Close()) }) - - // Record initial batch len (Pebble batch always has a header, so may not be 0) - initialLen := b.Len() - - // Add some operations - if err := b.Set([]byte("a"), []byte("1")); err != nil { - t.Fatalf("batch set: %v", err) - } - if err := b.Delete([]byte("to-delete")); err != nil { - t.Fatalf("batch delete: %v", err) - } +func TestIteratorPrev(t *testing.T) { + cfg := DefaultTestConfig(t) + cacheCfg := DefaultTestCacheConfig() + db := openDB(t, &cfg, &cacheCfg) - // Len should increase after operations (Pebble Len() returns bytes, not count) - if b.Len() <= initialLen { - t.Fatalf("expected Len() to increase after operations, got %d (initial %d)", b.Len(), initialLen) + for _, k := range []string{"a", "b", "c"} { + require.NoError(t, db.Set([]byte(k), []byte("x"), types.WriteOptions{Sync: false})) } - // Reset should clear the batch back to initial state - b.Reset() - if b.Len() != initialLen { - t.Fatalf("expected Len()=%d after Reset, got %d", initialLen, b.Len()) - } + itr, err := db.NewIter(nil) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, itr.Close()) }) - // Add and commit - if err := b.Set([]byte("b"), []byte("2")); err != nil { - t.Fatalf("batch set: %v", err) - } - if err := b.Commit(types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("batch commit: %v", err) - } + require.True(t, itr.Last()) + require.True(t, itr.Valid()) + require.Equal(t, "c", string(itr.Key())) - // Verify "b" was written - got, err := db.Get([]byte("b")) - if err != nil { - t.Fatalf("Get: %v", err) - } - if string(got) != "2" { - t.Fatalf("expected '2', got %q", got) - } + require.True(t, itr.Prev()) + require.True(t, itr.Valid()) + require.Equal(t, "b", string(itr.Key())) } func TestIteratorSeekLTAndValue(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) + cfg := DefaultTestConfig(t) + cacheCfg := DefaultTestCacheConfig() + db := openDB(t, &cfg, &cacheCfg) - // Insert keys: a, b, c with values for _, kv := range []struct{ k, v string }{ {"a", "val-a"}, {"b", "val-b"}, {"c", "val-c"}, } { - if err := db.Set([]byte(kv.k), []byte(kv.v), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set(%q): %v", kv.k, err) - } + require.NoError(t, db.Set([]byte(kv.k), []byte(kv.v), types.WriteOptions{Sync: false})) } itr, err := db.NewIter(nil) - if err != nil { - t.Fatalf("NewIter: %v", err) - } + require.NoError(t, err) t.Cleanup(func() { require.NoError(t, itr.Close()) }) - // SeekLT("c") should position at "b" - if !itr.SeekLT([]byte("c")) || !itr.Valid() { - t.Fatalf("expected SeekLT(c) to be valid") - } - if string(itr.Key()) != "b" { - t.Fatalf("expected key=b after SeekLT(c), got %q", itr.Key()) - } - if string(itr.Value()) != "val-b" { - t.Fatalf("expected value=val-b, got %q", itr.Value()) - } -} - -func TestFlush(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - t.Cleanup(func() { require.NoError(t, db.Close()) }) - - // Set some data - if err := db.Set([]byte("flush-test"), []byte("val"), types.WriteOptions{Sync: false}); err != nil { - t.Fatalf("Set: %v", err) - } - - // Flush should succeed - if err := db.Flush(); err != nil { - t.Fatalf("Flush: %v", err) - } - - // Data should still be readable - got, err := db.Get([]byte("flush-test")) - if err != nil { - t.Fatalf("Get after flush: %v", err) - } - if string(got) != "val" { - t.Fatalf("expected 'val', got %q", got) - } + require.True(t, itr.SeekLT([]byte("c"))) + require.True(t, itr.Valid()) + require.Equal(t, "b", string(itr.Key())) + require.Equal(t, "val-b", string(itr.Value())) } func TestCloseIsIdempotent(t *testing.T) { - dir := t.TempDir() - db, err := Open(t.Context(), dir, types.OpenOptions{}, false) - if err != nil { - t.Fatalf("Open: %v", err) - } - - // First close should succeed - if err := db.Close(); err != nil { - t.Fatalf("first Close: %v", err) - } - - // Second close should be idempotent (no panic, returns nil) - if err := db.Close(); err != nil { - t.Fatalf("second Close should return nil, got: %v", err) - } + cfg := DefaultTestConfig(t) + cacheCfg := DefaultTestCacheConfig() + db, err := OpenWithCache(t.Context(), &cfg, &cacheCfg, + threading.NewAdHocPool(), threading.NewAdHocPool()) + require.NoError(t, err) + + require.NoError(t, db.Close()) + require.NoError(t, db.Close(), "second Close should be idempotent") } diff --git a/sei-db/common/metrics/pebble_metrics.go b/sei-db/db_engine/pebbledb/pebble_metrics.go similarity index 98% rename from sei-db/common/metrics/pebble_metrics.go rename to sei-db/db_engine/pebbledb/pebble_metrics.go index d41e55d807..54f66c589e 100644 --- a/sei-db/common/metrics/pebble_metrics.go +++ b/sei-db/db_engine/pebbledb/pebble_metrics.go @@ -1,7 +1,4 @@ -// Package metrics provides OpenTelemetry instruments and scrapers for Pebble DB metrics, -// allowing any Pebble instance to export compaction, flush, cache, and storage metrics -// to OTel-compatible backends (e.g., Prometheus). -package metrics +package pebbledb import ( "context" @@ -13,6 +10,8 @@ import ( "go.opentelemetry.io/otel/metric" "github.com/cockroachdb/pebble/v2" + + smetrics "github.com/sei-protocol/sei-chain/sei-db/common/metrics" ) const pebbleMeterName = "seidb_pebble" @@ -266,31 +265,37 @@ func NewPebbleMetrics( "pebble_get_latency", metric.WithDescription("Time taken to get a key from PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) applyChangesetLatency, _ := meter.Float64Histogram( "pebble_apply_changeset_latency", metric.WithDescription("Time taken to apply changeset to PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) applyChangesetAsyncLatency, _ := meter.Float64Histogram( "pebble_apply_changeset_async_latency", metric.WithDescription("Time taken to queue changeset for async write"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) pruneLatency, _ := meter.Float64Histogram( "pebble_prune_latency", metric.WithDescription("Time taken to prune old versions from PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) importLatency, _ := meter.Float64Histogram( "pebble_import_latency", metric.WithDescription("Time taken to import snapshot data to PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) batchWriteLatency, _ := meter.Float64Histogram( "pebble_batch_write_latency", metric.WithDescription("Time taken to write a batch to PebbleDB"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) compactionCount, _ := meter.Int64Counter( @@ -302,6 +307,7 @@ func NewPebbleMetrics( "pebble_compaction_duration", metric.WithDescription("Duration of compaction operations"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) compactionBytesRead, _ := meter.Int64Counter( "pebble_compaction_bytes_read", @@ -424,6 +430,7 @@ func NewPebbleMetrics( "pebble_flush_duration", metric.WithDescription("Duration of memtable flush operations"), metric.WithUnit("s"), + metric.WithExplicitBucketBoundaries(smetrics.LatencyBuckets...), ) flushBytesWritten, _ := meter.Int64Counter( "pebble_flush_bytes_written", @@ -979,6 +986,7 @@ func NewPebbleMetrics( "pebble_batch_size", metric.WithDescription("Size of batches written to PebbleDB"), metric.WithUnit("By"), + metric.WithExplicitBucketBoundaries(smetrics.ByteSizeBuckets...), ) pendingChangesQueueDepth, _ := meter.Int64Gauge( "pebble_pending_changes_queue_depth", @@ -989,6 +997,7 @@ func NewPebbleMetrics( "pebble_iterator_iterations", metric.WithDescription("Number of iterations per iterator"), metric.WithUnit("{count}"), + metric.WithExplicitBucketBoundaries(smetrics.CountBuckets...), ) pm := &PebbleMetrics{ diff --git a/sei-db/db_engine/pebbledb/pebbledb_config.go b/sei-db/db_engine/pebbledb/pebbledb_config.go new file mode 100644 index 0000000000..383710e90a --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebbledb_config.go @@ -0,0 +1,35 @@ +package pebbledb + +import ( + "fmt" + "time" +) + +// Configuration for the PebbleDB database. +type PebbleDBConfig struct { + // The directory to store the database files. This has no default value and must be provided. + DataDir string + // Whether to enable pebble-internal metrics. + EnableMetrics bool + // How often to scrape pebble-internal metrics. + MetricsScrapeInterval time.Duration +} + +// Default configuration for the PebbleDB database. +func DefaultConfig() PebbleDBConfig { + return PebbleDBConfig{ + EnableMetrics: true, + MetricsScrapeInterval: 10 * time.Second, + } +} + +// Validates the configuration (basic sanity checks). +func (c *PebbleDBConfig) Validate() error { + if c.DataDir == "" { + return fmt.Errorf("data dir is required") + } + if c.EnableMetrics && c.MetricsScrapeInterval <= 0 { + return fmt.Errorf("metrics scrape interval must be positive when metrics are enabled") + } + return nil +} diff --git a/sei-db/db_engine/pebbledb/pebbledb_test_config.go b/sei-db/db_engine/pebbledb/pebbledb_test_config.go new file mode 100644 index 0000000000..897e73c0c2 --- /dev/null +++ b/sei-db/db_engine/pebbledb/pebbledb_test_config.go @@ -0,0 +1,25 @@ +package pebbledb + +import ( + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" +) + +// DefaultTestConfig returns a PebbleDBConfig suitable for testing. +// Allocates a smaller block cache and disables metrics. +func DefaultTestConfig(t *testing.T) PebbleDBConfig { + cfg := DefaultConfig() + cfg.DataDir = t.TempDir() + cfg.EnableMetrics = false + return cfg +} + +// DefaultTestCacheConfig returns a CacheConfig suitable for testing. +func DefaultTestCacheConfig() dbcache.CacheConfig { + return dbcache.CacheConfig{ + ShardCount: 8, + MaxSize: 16 * unit.MB, + } +} diff --git a/sei-db/db_engine/types/types.go b/sei-db/db_engine/types/types.go index 7d4c33a5b8..161017381d 100644 --- a/sei-db/db_engine/types/types.go +++ b/sei-db/db_engine/types/types.go @@ -54,6 +54,12 @@ type KeyValueDB interface { // Get returns the value for the given key, returning an error if the key is not found. Get(key []byte) (value []byte, err error) + // Perform a batch read operation. Given a map of keys to read, performs the reads and updates the + // map with the results. + // + // It is not thread safe to read or mutate the map while this method is running. + BatchGet(keys map[string]BatchGetResult) error + // Set sets the value for the given key. Set(key, value []byte, opts WriteOptions) error diff --git a/sei-db/state_db/bench/cryptosim/block_builder.go b/sei-db/state_db/bench/cryptosim/block_builder.go index 66d45e66d5..5389b33135 100644 --- a/sei-db/state_db/bench/cryptosim/block_builder.go +++ b/sei-db/state_db/bench/cryptosim/block_builder.go @@ -48,6 +48,7 @@ func (b *blockBuilder) Start() { // Builds blocks and sends them to the blocks channel. func (b *blockBuilder) mainLoop() { + defer b.dataGenerator.Close() for { block := b.buildBlock() select { diff --git a/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go b/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go index 0282e0e251..9a48afc410 100644 --- a/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go +++ b/sei-db/state_db/bench/cryptosim/cmd/configure-logger/main.go @@ -38,6 +38,21 @@ func run() error { return fmt.Errorf("load config: %w", err) } + if cfg.LogDir == "" { + return fmt.Errorf("LogDir is empty, refusing to proceed") + } + + if cfg.DeleteDataDirOnStartup { + resolved, err := filepath.Abs(cfg.LogDir) + if err != nil { + return fmt.Errorf("failed to resolve log directory: %w", err) + } + fmt.Fprintf(os.Stderr, "Deleting log directory: %s\n", resolved) + if err := os.RemoveAll(resolved); err != nil { + return fmt.Errorf("failed to delete log directory %s: %w", resolved, err) + } + } + logDir, err := cryptosim.ResolveAndCreateDir(cfg.LogDir) if err != nil { return fmt.Errorf("resolve log dir: %w", err) diff --git a/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go b/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go index 6c475dc3d3..7847630a07 100644 --- a/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go +++ b/sei-db/state_db/bench/cryptosim/cmd/cryptosim/main.go @@ -7,6 +7,7 @@ import ( "net/http" "os" "os/signal" + "path/filepath" "time" "github.com/prometheus/client_golang/prometheus" @@ -89,6 +90,21 @@ func run() error { } fmt.Printf("%s\n", configString) + if config.DeleteDataDirOnStartup { + if config.DataDir == "" { + return fmt.Errorf("DataDir is empty, refusing to delete") + } + resolved, err := filepath.Abs(config.DataDir) + if err != nil { + return fmt.Errorf("failed to resolve data directory: %w", err) + } + fmt.Printf("Deleting data directory: %s\n", resolved) + err = os.RemoveAll(resolved) + if err != nil { + return fmt.Errorf("failed to delete data directory: %w", err) + } + } + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) defer stop() @@ -134,5 +150,22 @@ func run() error { cs.BlockUntilHalted() + if config.DeleteDataDirOnShutdown { + for _, dir := range []string{config.DataDir, config.LogDir} { + if dir == "" { + return fmt.Errorf("directory path is empty, refusing to delete") + } + resolved, err := filepath.Abs(dir) + if err != nil { + return fmt.Errorf("failed to resolve directory: %w", err) + } + fmt.Printf("Deleting directory: %s\n", resolved) + err = os.RemoveAll(resolved) + if err != nil { + return fmt.Errorf("failed to delete directory %s: %w", resolved, err) + } + } + } + return nil } diff --git a/sei-db/state_db/bench/cryptosim/config/basic-config.json b/sei-db/state_db/bench/cryptosim/config/basic-config.json index 5c2b18b7c1..283016de21 100644 --- a/sei-db/state_db/bench/cryptosim/config/basic-config.json +++ b/sei-db/state_db/bench/cryptosim/config/basic-config.json @@ -11,6 +11,8 @@ "Erc20ContractSize": 2048, "Erc20InteractionsPerAccount": 10, "Erc20StorageSlotSize": 32, + "DeleteDataDirOnStartup": false, + "DeleteDataDirOnShutdown": false, "ExecutorQueueSize": 1024, "HotAccountProbability": 0.1, "HotErc20ContractProbability": 0.5, diff --git a/sei-db/state_db/bench/cryptosim/config/debug.json b/sei-db/state_db/bench/cryptosim/config/debug.json new file mode 100644 index 0000000000..a8e5666931 --- /dev/null +++ b/sei-db/state_db/bench/cryptosim/config/debug.json @@ -0,0 +1,8 @@ +{ + "Comment": "For locally testing/debugging the benchmark or related code.", + "DataDir": "data", + "LogDir": "logs", + "DeleteDataDirOnStartup": true, + "DeleteDataDirOnShutdown": true +} + diff --git a/sei-db/state_db/bench/cryptosim/config/large.json b/sei-db/state_db/bench/cryptosim/config/large.json deleted file mode 100644 index b17fd20e33..0000000000 --- a/sei-db/state_db/bench/cryptosim/config/large.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "Comment": "A large simulation. This is the largest sane simulation for local testing.", - "DataDir": "data", - "LogDir": "logs", - "MinimumNumberOfColdAccounts": 1000000, - "MinimumNumberOfDormantAccounts": 100000000 -} - diff --git a/sei-db/state_db/bench/cryptosim/config/medium.json b/sei-db/state_db/bench/cryptosim/config/medium.json deleted file mode 100644 index d68cc6f9a1..0000000000 --- a/sei-db/state_db/bench/cryptosim/config/medium.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "Comment": "A medium-sized simulation. Takes a few minutes to set up, but is not extremely onerous to set up.", - "DataDir": "data", - "LogDir": "logs", - "MinimumNumberOfColdAccounts": 1000000, - "MinimumNumberOfDormantAccounts": 10000000 -} - diff --git a/sei-db/state_db/bench/cryptosim/config/reciept-store.json b/sei-db/state_db/bench/cryptosim/config/reciept-store.json index dbb621e8ae..ac2801ec8e 100644 --- a/sei-db/state_db/bench/cryptosim/config/reciept-store.json +++ b/sei-db/state_db/bench/cryptosim/config/reciept-store.json @@ -1,6 +1,7 @@ { "Comment": "For testing with the state store and reciept store both enabled.", "DataDir": "data", + "LogDir": "logs", "MinimumNumberOfColdAccounts": 1000000, "MinimumNumberOfDormantAccounts": 1000000, "GenerateReceipts": true diff --git a/sei-db/state_db/bench/cryptosim/config/standard-perf.json b/sei-db/state_db/bench/cryptosim/config/standard-perf.json new file mode 100644 index 0000000000..ca267f6ef0 --- /dev/null +++ b/sei-db/state_db/bench/cryptosim/config/standard-perf.json @@ -0,0 +1,13 @@ +{ + "Comment": "The standardized parameters for performance and longevity testing.", + "DataDir": "data", + "LogDir": "logs", + "MinimumNumberOfColdAccounts": 1000000, + "MinimumNumberOfDormantAccounts": 100000000, + "FlatKVConfig": { + "AccountCacheConfig": { "MaxSize": 1073741824 }, + "CodeCacheConfig": { "MaxSize": 1073741824 }, + "StorageCacheConfig": { "MaxSize": 4294967296 } + } +} + diff --git a/sei-db/state_db/bench/cryptosim/cryptosim.go b/sei-db/state_db/bench/cryptosim/cryptosim.go index 2b1b593250..c686139ed8 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim.go @@ -111,10 +111,13 @@ func NewCryptoSim( config.MinimumNumberOfDormantAccounts = 2 * config.TransactionsPerBlock } + // The workload context is cancelled on Ctrl-C (or programmatically) to + // stop the benchmark loop and executors. ctx, cancel := context.WithCancel(ctx) var err error config.DataDir, err = ResolveAndCreateDir(config.DataDir) + if err != nil { cancel() return nil, fmt.Errorf("failed to resolve and create data directory: %w", err) @@ -128,7 +131,12 @@ func NewCryptoSim( fmt.Printf("Running cryptosim benchmark from data directory: %s\n", config.DataDir) fmt.Printf("Logs are being routed to: %s\n", config.LogDir) - db, err := wrappers.NewDBImpl(ctx, config.Backend, config.DataDir) + var dbConfig any + if config.Backend == wrappers.FlatKV { + dbConfig = config.FlatKVConfig + } + + db, err := wrappers.NewDBImpl(ctx, config.Backend, config.DataDir, dbConfig) if err != nil { cancel() return nil, fmt.Errorf("failed to create database: %w", err) @@ -500,8 +508,7 @@ func (c *CryptoSim) teardown() { } } - c.dataGenerator.Close() - + c.cancel() c.closeChan <- struct{}{} } diff --git a/sei-db/state_db/bench/cryptosim/cryptosim_config.go b/sei-db/state_db/bench/cryptosim/cryptosim_config.go index 53b5f28e65..9eb7ebb037 100644 --- a/sei-db/state_db/bench/cryptosim/cryptosim_config.go +++ b/sei-db/state_db/bench/cryptosim/cryptosim_config.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/sei-protocol/sei-chain/sei-db/state_db/bench/wrappers" + "github.com/sei-protocol/sei-chain/sei-db/state_db/sc/flatkv" ) const ( @@ -137,6 +138,15 @@ type CryptoSimConfig struct { // If false, Enter has no effect. EnableSuspension bool + // If true, the data directory and log directory will be deleted on startup if they exist. + DeleteDataDirOnStartup bool + + // If true, the data directory and log directory will be deleted on a clean shutdown. + DeleteDataDirOnShutdown bool + + // Configures the FlatKV database. Ignored if Backend is not "FlatKV". + FlatKVConfig *flatkv.Config + // The capacity of the channel that holds blocks awaiting execution. BlockChannelCapacity int @@ -179,7 +189,7 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { // Note: if you add new fields or modify default values, be sure to keep config/basic-config.json in sync. // That file should contain every available config set to its default value, as a reference. - return &CryptoSimConfig{ + cfg := &CryptoSimConfig{ NumberOfHotAccounts: 100, MinimumNumberOfColdAccounts: 1_000_000, MinimumNumberOfDormantAccounts: 1_000_000, @@ -210,6 +220,9 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { TransactionMetricsSampleRate: 0.001, BackgroundMetricsScrapeInterval: 60, EnableSuspension: true, + DeleteDataDirOnStartup: false, + DeleteDataDirOnShutdown: false, + FlatKVConfig: flatkv.DefaultConfig(), BlockChannelCapacity: 8, GenerateReceipts: false, RecieptChannelCapacity: 32, @@ -219,6 +232,8 @@ func DefaultCryptoSimConfig() *CryptoSimConfig { ReceiptPruneIntervalSeconds: 600, LogLevel: "info", } + + return cfg } // StringifiedConfig returns the config as human-readable, multi-line JSON. diff --git a/sei-db/state_db/bench/helper.go b/sei-db/state_db/bench/helper.go index 2a3f7b6d59..2df93e25a7 100644 --- a/sei-db/state_db/bench/helper.go +++ b/sei-db/state_db/bench/helper.go @@ -396,7 +396,7 @@ func runBenchmark(b *testing.B, scenario TestScenario, withProgress bool) { func() { dbDir := b.TempDir() b.StopTimer() - cs, err := wrappers.NewDBImpl(b.Context(), scenario.Backend, dbDir) + cs, err := wrappers.NewDBImpl(b.Context(), scenario.Backend, dbDir, nil) require.NoError(b, err) // Load snapshot if available diff --git a/sei-db/state_db/bench/wrappers/db_implementations.go b/sei-db/state_db/bench/wrappers/db_implementations.go index be88e7edaa..bba15e8799 100644 --- a/sei-db/state_db/bench/wrappers/db_implementations.go +++ b/sei-db/state_db/bench/wrappers/db_implementations.go @@ -46,12 +46,18 @@ func newMemIAVLCommitStore(dbDir string) (DBWrapper, error) { return NewMemIAVLWrapper(cs), nil } -func newFlatKVCommitStore(ctx context.Context, dbDir string) (DBWrapper, error) { - cfg := flatkv.DefaultConfig() - cfg.Fsync = false +func newFlatKVCommitStore(ctx context.Context, dbDir string, config *flatkv.Config) (DBWrapper, error) { + if config == nil { + config = flatkv.DefaultConfig() + } + config.DataDir = dbDir + fmt.Printf("Opening flatKV from directory %s\n", dbDir) - cs := flatkv.NewCommitStore(ctx, dbDir, cfg) - _, err := cs.LoadVersion(0, false) + cs, err := flatkv.NewCommitStore(ctx, config) + if err != nil { + return nil, fmt.Errorf("failed to create FlatKV commit store: %w", err) + } + _, err = cs.LoadVersion(0, false) if err != nil { if closeErr := cs.Close(); closeErr != nil { fmt.Printf("failed to close commit store during error recovery: %v\n", closeErr) @@ -119,12 +125,12 @@ func newCombinedCompositeDualSSComposite(ctx context.Context, dbDir string) (DBW } // NewDBImpl instantiates a new empty DBWrapper based on the given DBType. -func NewDBImpl(ctx context.Context, dbType DBType, dataDir string) (DBWrapper, error) { +func NewDBImpl(ctx context.Context, dbType DBType, dataDir string, dbConfig any) (DBWrapper, error) { switch dbType { case MemIAVL: return newMemIAVLCommitStore(dataDir) case FlatKV: - return newFlatKVCommitStore(ctx, dataDir) + return newFlatKVCommitStore(ctx, dataDir, dbConfig.(*flatkv.Config)) case CompositeDual: return newCompositeCommitStore(ctx, dataDir, config.DualWrite) case CompositeSplit: diff --git a/sei-db/state_db/sc/composite/store.go b/sei-db/state_db/sc/composite/store.go index a9fa115f8a..1d49e4121f 100644 --- a/sei-db/state_db/sc/composite/store.go +++ b/sei-db/state_db/sc/composite/store.go @@ -68,8 +68,12 @@ func NewCompositeCommitStore( // Initialize FlatKV store struct if write mode requires it // Note: DB is NOT opened here, will be opened in LoadVersion if cfg.WriteMode == config.DualWrite || cfg.WriteMode == config.SplitWrite { - flatkvPath := filepath.Join(homeDir, "data", "flatkv") - store.evmCommitter = flatkv.NewCommitStore(ctx, flatkvPath, cfg.FlatKVConfig) + cfg.FlatKVConfig.DataDir = filepath.Join(homeDir, "data", "flatkv") + var err error + store.evmCommitter, err = flatkv.NewCommitStore(ctx, &cfg.FlatKVConfig) + if err != nil { + panic(fmt.Errorf("failed to create FlatKV commit store: %w", err)) + } } return store diff --git a/sei-db/state_db/sc/flatkv/config.go b/sei-db/state_db/sc/flatkv/config.go index ebe47e4c68..9959dcbde4 100644 --- a/sei-db/state_db/sc/flatkv/config.go +++ b/sei-db/state_db/sc/flatkv/config.go @@ -1,5 +1,14 @@ package flatkv +import ( + "fmt" + "path/filepath" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" +) + const ( DefaultSnapshotInterval uint32 = 10000 DefaultSnapshotKeepRecent uint32 = 2 @@ -7,6 +16,10 @@ const ( // Config defines configuration for the FlatKV (EVM) commit store. type Config struct { + // DataDir is the root directory for the FlatKV data files. + // Must be set before calling Validate(). + DataDir string + // Fsync controls whether PebbleDB writes (data DBs + metadataDB) use fsync. // WAL always uses NoSync (matching memiavl); crash recovery relies on // WAL catchup, which is idempotent. @@ -33,15 +46,148 @@ type Config struct { // EnablePebbleMetrics defines if the Pebble metrics should be enabled. // Default: true EnablePebbleMetrics bool `mapstructure:"enable-pebble-metrics"` + + // AccountDBConfig defines the PebbleDB configuration for the account database. + AccountDBConfig pebbledb.PebbleDBConfig + // AccountCacheConfig defines the cache configuration for the account database. + AccountCacheConfig dbcache.CacheConfig + + // CodeDBConfig defines the PebbleDB configuration for the code database. + CodeDBConfig pebbledb.PebbleDBConfig + // CodeCacheConfig defines the cache configuration for the code database. + CodeCacheConfig dbcache.CacheConfig + + // StorageDBConfig defines the PebbleDB configuration for the storage database. + StorageDBConfig pebbledb.PebbleDBConfig + // StorageCacheConfig defines the cache configuration for the storage database. + StorageCacheConfig dbcache.CacheConfig + + // LegacyDBConfig defines the PebbleDB configuration for the legacy database. + LegacyDBConfig pebbledb.PebbleDBConfig + // LegacyCacheConfig defines the cache configuration for the legacy database. + LegacyCacheConfig dbcache.CacheConfig + + // MetadataDBConfig defines the PebbleDB configuration for the metadata database. + MetadataDBConfig pebbledb.PebbleDBConfig + // MetadataCacheConfig defines the cache configuration for the metadata database. + MetadataCacheConfig dbcache.CacheConfig + + // Controls the number of goroutines in the DB read pool. The number of threads in this pool is equal to + // ReaderThreadsPerCore * runtime.NumCPU() + ReaderConstantThreadCount. + ReaderThreadsPerCore float64 + + // Controls the number of goroutines in the DB read pool. The number of threads in this pool is equal to + // ReaderThreadsPerCore * runtime.NumCPU() + ReaderConstantThreadCount. + ReaderConstantThreadCount int + + // Controls the size of the queue for work sent to the read pool. + ReaderPoolQueueSize int + + // Controls the number of goroutines pre-allocated in the thread pool for miscellaneous operations. + // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. + MiscPoolThreadsPerCore float64 + + // Controls the number of goroutines pre-allocated in the thread pool for miscellaneous operations. + // The number of threads in this pool is equal to MiscThreadsPerCore * runtime.NumCPU() + MiscConstantThreadCount. + MiscConstantThreadCount int } // DefaultConfig returns Config with safe default values. -func DefaultConfig() Config { - return Config{ - Fsync: false, - AsyncWriteBuffer: 0, - SnapshotInterval: DefaultSnapshotInterval, - SnapshotKeepRecent: DefaultSnapshotKeepRecent, - EnablePebbleMetrics: true, +func DefaultConfig() *Config { + cfg := &Config{ + Fsync: false, + AsyncWriteBuffer: 0, + SnapshotInterval: DefaultSnapshotInterval, + SnapshotKeepRecent: DefaultSnapshotKeepRecent, + EnablePebbleMetrics: true, + AccountDBConfig: pebbledb.DefaultConfig(), + AccountCacheConfig: dbcache.DefaultCacheConfig(), + CodeDBConfig: pebbledb.DefaultConfig(), + CodeCacheConfig: dbcache.DefaultCacheConfig(), + StorageDBConfig: pebbledb.DefaultConfig(), + StorageCacheConfig: dbcache.DefaultCacheConfig(), + LegacyDBConfig: pebbledb.DefaultConfig(), + LegacyCacheConfig: dbcache.DefaultCacheConfig(), + MetadataDBConfig: pebbledb.DefaultConfig(), + MetadataCacheConfig: dbcache.DefaultCacheConfig(), + ReaderThreadsPerCore: 2.0, + ReaderConstantThreadCount: 0, + ReaderPoolQueueSize: 1024, + MiscPoolThreadsPerCore: 4.0, + MiscConstantThreadCount: 0, + } + + cfg.AccountCacheConfig.MaxSize = unit.GB + cfg.StorageCacheConfig.MaxSize = unit.GB * 4 + + return cfg +} + +// Copy returns a deep copy of the Config. +func (c *Config) Copy() *Config { + // The nested PebbleDB configs are value types, so a shallow struct copy is sufficient. + cp := *c + return &cp +} + +// InitializeDataDirectories sets the DataDir for each nested PebbleDB config +// that does not already have one, using DataDir as the base path. The DBs live +// under the working directory: /working/. +func (c *Config) InitializeDataDirectories() { + workDir := filepath.Join(c.DataDir, workingDirName) + if c.AccountDBConfig.DataDir == "" { + c.AccountDBConfig.DataDir = filepath.Join(workDir, accountDBDir) + } + if c.CodeDBConfig.DataDir == "" { + c.CodeDBConfig.DataDir = filepath.Join(workDir, codeDBDir) + } + if c.StorageDBConfig.DataDir == "" { + c.StorageDBConfig.DataDir = filepath.Join(workDir, storageDBDir) + } + if c.LegacyDBConfig.DataDir == "" { + c.LegacyDBConfig.DataDir = filepath.Join(workDir, legacyDBDir) + } + if c.MetadataDBConfig.DataDir == "" { + c.MetadataDBConfig.DataDir = filepath.Join(workDir, metadataDir) + } +} + +// Validate checks that the configuration is sane and returns an error if it is not. +func (c *Config) Validate() error { + if c.DataDir == "" { + return fmt.Errorf("data dir is required") + } + if err := c.AccountDBConfig.Validate(); err != nil { + return fmt.Errorf("account db config is invalid: %w", err) + } + if err := c.CodeDBConfig.Validate(); err != nil { + return fmt.Errorf("code db config is invalid: %w", err) } + if err := c.StorageDBConfig.Validate(); err != nil { + return fmt.Errorf("storage db config is invalid: %w", err) + } + if err := c.LegacyDBConfig.Validate(); err != nil { + return fmt.Errorf("legacy db config is invalid: %w", err) + } + if err := c.MetadataDBConfig.Validate(); err != nil { + return fmt.Errorf("metadata db config is invalid: %w", err) + } + + if c.ReaderThreadsPerCore < 0 { + return fmt.Errorf("reader threads per core must be greater than 0") + } + if c.ReaderConstantThreadCount < 0 { + return fmt.Errorf("reader constant thread count must be greater than 0") + } + if c.ReaderPoolQueueSize < 0 { + return fmt.Errorf("reader pool queue size must be greater than 0") + } + if c.MiscPoolThreadsPerCore < 0 { + return fmt.Errorf("misc threads per core must be greater than 0") + } + if c.MiscConstantThreadCount < 0 { + return fmt.Errorf("misc constant thread count must be greater than 0") + } + + return nil } diff --git a/sei-db/state_db/sc/flatkv/exporter_test.go b/sei-db/state_db/sc/flatkv/exporter_test.go index 2746d4633c..55475959cc 100644 --- a/sei-db/state_db/sc/flatkv/exporter_test.go +++ b/sei-db/state_db/sc/flatkv/exporter_test.go @@ -295,7 +295,11 @@ func TestImportSurvivesReopen(t *testing.T) { dir := t.TempDir() dbPath := filepath.Join(dir, flatkvRootDir) - s1 := NewCommitStore(t.Context(), dbPath, DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbPath + + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s1.LoadVersion(0, false) require.NoError(t, err) @@ -309,7 +313,11 @@ func TestImportSurvivesReopen(t *testing.T) { require.NoError(t, s1.Close()) // Reopen from the same directory — data must survive. - s2 := NewCommitStore(t.Context(), dbPath, DefaultConfig()) + cfg2 := DefaultTestConfig(t) + cfg2.DataDir = dbPath + + s2, err := NewCommitStore(t.Context(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(1, false) require.NoError(t, err) defer s2.Close() @@ -336,8 +344,12 @@ func TestImportPurgesStaleData(t *testing.T) { dir := t.TempDir() dbPath := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbPath, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbPath + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addrA := Address{0xAA} @@ -413,7 +425,8 @@ func TestImportPurgesStaleData(t *testing.T) { // --- Phase 3: import snapshot into the existing store --- require.NoError(t, s.Close()) - s = NewCommitStore(t.Context(), dbPath, DefaultConfig()) + s, err = NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) @@ -451,7 +464,8 @@ func TestImportPurgesStaleData(t *testing.T) { // Verify the store survives a reopen. require.NoError(t, s.Close()) - s = NewCommitStore(t.Context(), dbPath, DefaultConfig()) + s, err = NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s.LoadVersion(1, false) require.NoError(t, err) defer s.Close() @@ -468,8 +482,12 @@ func TestImporterFailsWhenResetCannotRemoveCurrentLink(t *testing.T) { dir := t.TempDir() dbPath := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbPath, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbPath + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() diff --git a/sei-db/state_db/sc/flatkv/flatkv_test_config.go b/sei-db/state_db/sc/flatkv/flatkv_test_config.go new file mode 100644 index 0000000000..4ab1b71bfa --- /dev/null +++ b/sei-db/state_db/sc/flatkv/flatkv_test_config.go @@ -0,0 +1,47 @@ +package flatkv + +import ( + "path/filepath" + "testing" + + "github.com/sei-protocol/sei-chain/sei-db/common/unit" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" +) + +func smallTestPebbleConfig() pebbledb.PebbleDBConfig { + return pebbledb.PebbleDBConfig{ + EnableMetrics: false, + } +} + +func smallTestCacheConfig() dbcache.CacheConfig { + return dbcache.CacheConfig{ + ShardCount: 8, + MaxSize: 16 * unit.MB, + } +} + +// DefaultTestConfig returns a Config suitable for unit tests. It uses +// t.TempDir() as the DataDir root, small cache sizes, and disables metrics. +func DefaultTestConfig(t *testing.T) *Config { + t.Helper() + return &Config{ + DataDir: filepath.Join(t.TempDir(), flatkvRootDir), + SnapshotInterval: DefaultSnapshotInterval, + SnapshotKeepRecent: DefaultSnapshotKeepRecent, + AccountDBConfig: smallTestPebbleConfig(), + AccountCacheConfig: smallTestCacheConfig(), + CodeDBConfig: smallTestPebbleConfig(), + CodeCacheConfig: smallTestCacheConfig(), + StorageDBConfig: smallTestPebbleConfig(), + StorageCacheConfig: smallTestCacheConfig(), + LegacyDBConfig: smallTestPebbleConfig(), + LegacyCacheConfig: smallTestCacheConfig(), + MetadataDBConfig: smallTestPebbleConfig(), + MetadataCacheConfig: smallTestCacheConfig(), + ReaderThreadsPerCore: 2.0, + ReaderPoolQueueSize: 1024, + MiscPoolThreadsPerCore: 4.0, + } +} diff --git a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go index 79cb9a2e8b..3a408cfc3a 100644 --- a/sei-db/state_db/sc/flatkv/lthash_correctness_test.go +++ b/sei-db/state_db/sc/flatkv/lthash_correctness_test.go @@ -642,8 +642,11 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { dir := t.TempDir() // Phase 1: create state and close - s1 := NewCommitStore(t.Context(), dir, DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dir + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) for i := 1; i <= 10; i++ { @@ -660,7 +663,10 @@ func TestLtHashPersistenceAfterReopen(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen and verify - s2 := NewCommitStore(t.Context(), dir, DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = dir + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() diff --git a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go index 6d22b82a16..60fe3b3af1 100644 --- a/sei-db/state_db/sc/flatkv/perdb_lthash_test.go +++ b/sei-db/state_db/sc/flatkv/perdb_lthash_test.go @@ -95,8 +95,12 @@ func TestPerDBLtHashSkewRecovery(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s1 := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitMixedState(t, s1, 1) @@ -110,13 +114,20 @@ func TestPerDBLtHashSkewRecovery(t *testing.T) { require.NoError(t, err) metaDBPath := filepath.Join(snapDir, metadataDir) - db, err := pebbledb.Open(t.Context(), metaDBPath, types.OpenOptions{}, false) + metaCfg := pebbledb.DefaultConfig() + metaCfg.DataDir = metaDBPath + metaCfg.EnableMetrics = false + db, err := pebbledb.Open(t.Context(), &metaCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) // Reopen -- catchup should replay version 2 from WAL - s2 := NewCommitStore(t.Context(), dbDir, DefaultConfig()) + cfg2 := DefaultTestConfig(t) + cfg2.DataDir = dbDir + + s2, err := NewCommitStore(t.Context(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -131,8 +142,12 @@ func TestPerDBLtHashPersistenceAfterReopen(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s1 := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) for i := byte(1); i <= 10; i++ { @@ -142,7 +157,11 @@ func TestPerDBLtHashPersistenceAfterReopen(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - s2 := NewCommitStore(t.Context(), dbDir, DefaultConfig()) + cfg2 := DefaultTestConfig(t) + cfg2.DataDir = dbDir + + s2, err := NewCommitStore(t.Context(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -236,8 +255,12 @@ func TestPerDBLtHashCatchupReplay(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s1 := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitMixedState(t, s1, 1) @@ -255,7 +278,11 @@ func TestPerDBLtHashCatchupReplay(t *testing.T) { } require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), dbDir, DefaultConfig()) + cfg2 := DefaultTestConfig(t) + cfg2.DataDir = dbDir + + s2, err := NewCommitStore(t.Context(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -297,8 +324,12 @@ func TestPerDBLtHashAfterImport(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) imp, err := s.Importer(1) @@ -333,8 +364,12 @@ func TestPerDBLtHashRollback(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitMixedState(t, s, 1) @@ -358,8 +393,12 @@ func TestPerDBLtHashPersistedInLocalMeta(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitMixedState(t, s, 1) @@ -388,8 +427,12 @@ func TestPerDBLtHashAfterDirectImport(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) var pairs []*iavl.KVPair diff --git a/sei-db/state_db/sc/flatkv/snapshot.go b/sei-db/state_db/sc/flatkv/snapshot.go index d95e2b6453..05f79c2e55 100644 --- a/sei-db/state_db/sc/flatkv/snapshot.go +++ b/sei-db/state_db/sc/flatkv/snapshot.go @@ -378,8 +378,10 @@ func (s *CommitStore) migrateFlatLayout(flatkvDir string) (string, error) { // Determine version for the snapshot name. The metadata DB might still // be at the flat location or might have been moved in a prior attempt. var version int64 - metaPath := filepath.Join(flatkvDir, metadataDir) - if tmpMeta, err := pebbledb.Open(s.ctx, metaPath, types.OpenOptions{}, s.config.EnablePebbleMetrics); err == nil { + metaCfg := s.config.MetadataDBConfig + metaCfg.DataDir = filepath.Join(flatkvDir, metadataDir) + tmpMeta, err := pebbledb.Open(s.ctx, &metaCfg) + if err == nil { verData, verErr := tmpMeta.Get(metaVersionKey) _ = tmpMeta.Close() if verErr == nil && len(verData) == 8 { diff --git a/sei-db/state_db/sc/flatkv/snapshot_test.go b/sei-db/state_db/sc/flatkv/snapshot_test.go index 4f2bfed735..2bc48bd75a 100644 --- a/sei-db/state_db/sc/flatkv/snapshot_test.go +++ b/sei-db/state_db/sc/flatkv/snapshot_test.go @@ -32,8 +32,11 @@ func commitStorageEntry(t *testing.T, s *CommitStore, addr Address, slot Slot, v func TestSnapshotCreatesDir(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -59,8 +62,11 @@ func TestSnapshotCreatesDir(t *testing.T) { func TestSnapshotIdempotent(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -79,8 +85,11 @@ func TestOpenFromSnapshot(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit v1 and v2, snapshot at v2, commit v3 - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x10}, Slot{0x01}, []byte{0x01}) @@ -96,7 +105,10 @@ func TestOpenFromSnapshot(t *testing.T) { require.NoError(t, s1.Close()) // Phase 2: reopen - should catchup from v2 snapshot + WAL entry for v3 - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -118,8 +130,11 @@ func TestOpenFromSnapshot(t *testing.T) { func TestCatchupUpdatesLtHash(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) // Commit 5 versions, snapshot at v2 @@ -136,7 +151,10 @@ func TestCatchupUpdatesLtHash(t *testing.T) { require.NoError(t, s1.Close()) // Reopen: catchup from v2 snapshot through v3,v4,v5 via WAL - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -148,10 +166,10 @@ func TestCatchupUpdatesLtHash(t *testing.T) { } func TestRollbackRewindsState(t *testing.T) { - dir := t.TempDir() - - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) // Commit v1..v5, snapshot at v3 @@ -185,10 +203,10 @@ func TestRollbackRewindsState(t *testing.T) { } func TestRollbackToSnapshotExact(t *testing.T) { - dir := t.TempDir() - - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x40}, Slot{0x01}, []byte{0x01}) @@ -208,8 +226,11 @@ func TestRollbackToSnapshotExact(t *testing.T) { func TestPartialSnapshotCleanup(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x50}, Slot{0x01}, []byte{0x01}) @@ -255,7 +276,9 @@ func TestMigrationFromFlatLayout(t *testing.T) { dbPath := filepath.Join(flatkvDir, sub) require.NoError(t, os.MkdirAll(dbPath, 0750)) // Create an actual PebbleDB so Open works - db, err := pebbledb.Open(t.Context(), dbPath, types.OpenOptions{}, false) + cfg := pebbledb.DefaultTestConfig(t) + cfg.DataDir = dbPath + db, err := pebbledb.Open(t.Context(), &cfg) require.NoError(t, err) require.NoError(t, db.Close()) } @@ -265,7 +288,10 @@ func TestMigrationFromFlatLayout(t *testing.T) { require.True(t, os.IsNotExist(err)) // Open the store - should trigger migration - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -296,8 +322,11 @@ func TestOpenVersionValidation(t *testing.T) { dir := t.TempDir() // Phase 1: create store, commit some data - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x60}, Slot{0x01}, []byte{0x11}) @@ -312,13 +341,19 @@ func TestOpenVersionValidation(t *testing.T) { require.NoError(t, err) accountDBPath := filepath.Join(snapDir, accountDBDir) - db, err := pebbledb.Open(t.Context(), accountDBPath, types.OpenOptions{}, false) + acctCfg := pebbledb.DefaultConfig() + acctCfg.DataDir = accountDBPath + acctCfg.EnableMetrics = false + db, err := pebbledb.Open(t.Context(), &acctCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) // Phase 3: reopen - should detect skew and catchup - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -387,8 +422,11 @@ func TestSeekSnapshot(t *testing.T) { func TestLoadVersionWithTarget(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x70}, Slot{0x01}, []byte{0x01}) @@ -400,7 +438,10 @@ func TestLoadVersionWithTarget(t *testing.T) { require.NoError(t, s1.Close()) // Reopen at specific version 3 - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(3, false) require.NoError(t, err) defer s2.Close() @@ -419,8 +460,11 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) // Phase 1: build baseline at v2 and snapshot it. - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s1, addr, slot, []byte{0x01}) // v1 @@ -440,7 +484,10 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { // Phase 3: reopen exactly at v2. If later commits had mutated the snapshot // baseline in place, we'd incorrectly read 0x04 here. - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(2, false) require.NoError(t, err) gotV2, ok := s2.Get(key) @@ -449,7 +496,10 @@ func TestSnapshotThenCatchupThenVerifyCorrectness(t *testing.T) { require.NoError(t, s2.Close()) // Phase 4: reopen latest again to ensure catchup/replay still reaches v4. - s3 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s3, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s3.LoadVersion(0, false) require.NoError(t, err) defer s3.Close() @@ -469,8 +519,11 @@ func TestLoadVersionMixedSequence(t *testing.T) { slot := Slot{0x81} key := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, addr, slot, []byte{0x01}) @@ -484,7 +537,10 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s.Close()) // Round 1: load exactly v2 - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s1.LoadVersion(2, false) require.NoError(t, err) require.Equal(t, int64(2), s1.Version()) @@ -495,7 +551,10 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s1.Close()) // Round 2: load latest (catches up through v3, v4) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) require.Equal(t, int64(4), s2.Version()) @@ -506,7 +565,10 @@ func TestLoadVersionMixedSequence(t *testing.T) { require.NoError(t, s2.Close()) // Round 3: load v2 AGAIN — snapshot must still be clean. - s3 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s3, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s3.LoadVersion(2, false) require.NoError(t, err, "LoadVersion(2) must succeed after LoadVersion(0) dirtied working dir") require.Equal(t, int64(2), s3.Version()) @@ -522,8 +584,11 @@ func TestLoadVersionMixedSequence(t *testing.T) { func TestRollbackTargetBeforeWALStart(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) // Build: v1..v5, snapshot at v2 @@ -556,7 +621,10 @@ func TestRollbackTargetBeforeWALStart(t *testing.T) { // Simulate restart: should stay at v2. require.NoError(t, s.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -753,9 +821,12 @@ func TestCreateWorkingDirReclones(t *testing.T) { // ============================================================================= func TestPruneSnapshotsKeepsRecent(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotKeepRecent: 1}) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(t.TempDir(), flatkvRootDir) + cfg.SnapshotKeepRecent = 1 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) for i := 0; i < 5; i++ { @@ -763,9 +834,8 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) } - flatkvDir := filepath.Join(dir, flatkvRootDir) var snapshots []int64 - _ = traverseSnapshots(flatkvDir, true, func(v int64) (bool, error) { + _ = traverseSnapshots(cfg.DataDir, true, func(v int64) (bool, error) { snapshots = append(snapshots, v) return false, nil }) @@ -777,9 +847,11 @@ func TestPruneSnapshotsKeepsRecent(t *testing.T) { } func TestPruneSnapshotsKeepAll(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotKeepRecent: 100}) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.SnapshotKeepRecent = 100 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -788,9 +860,8 @@ func TestPruneSnapshotsKeepAll(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) } - flatkvDir := filepath.Join(dir, flatkvRootDir) var count int - _ = traverseSnapshots(flatkvDir, true, func(_ int64) (bool, error) { + _ = traverseSnapshots(cfg.DataDir, true, func(_ int64) (bool, error) { count++ return false, nil }) @@ -814,7 +885,10 @@ func TestOrphanSnapshotRecovery(t *testing.T) { _, err := os.Lstat(currentPath(flatkvDir)) require.True(t, os.IsNotExist(err), "no current symlink should exist") - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -875,9 +949,10 @@ func TestTraverseSnapshotsEarlyStop(t *testing.T) { // ============================================================================= func TestVerifyWALTailSuccess(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -889,9 +964,10 @@ func TestVerifyWALTailSuccess(t *testing.T) { } func TestVerifyWALTailMismatch(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -912,8 +988,12 @@ func TestTryTruncateWAL(t *testing.T) { // SnapshotKeepRecent=0 so pruneSnapshots removes snapshot-0 once // the manual snapshot at v5 is created; this makes v5 the earliest // snapshot and gives tryTruncateWAL a positive truncation offset. - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotKeepRecent: 0}) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotKeepRecent = 0 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -936,9 +1016,10 @@ func TestTryTruncateWAL(t *testing.T) { } func TestTryTruncateWALNoSnapshot(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -958,8 +1039,11 @@ func TestTryTruncateWALNoSnapshot(t *testing.T) { func TestRollbackRemovesPostTargetSnapshots(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -1056,8 +1140,12 @@ func TestSeekSnapshotExact(t *testing.T) { func TestMultipleSnapshotsAndReopen(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotKeepRecent: 10}) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotKeepRecent = 10 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) var hashes [][]byte @@ -1070,8 +1158,12 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { for i, expectedHash := range hashes { ver := int64(i + 1) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotKeepRecent: 10}) - _, err := s2.LoadVersion(ver, false) + cfg2 := DefaultTestConfig(t) + cfg2.DataDir = filepath.Join(dir, flatkvRootDir) + cfg2.SnapshotKeepRecent = 10 + s2, err := NewCommitStore(t.Context(), cfg2) + require.NoError(t, err) + _, err = s2.LoadVersion(ver, false) require.NoError(t, err) require.Equal(t, ver, s2.Version()) require.Equal(t, expectedHash, s2.RootHash(), "hash mismatch at version %d", ver) @@ -1085,8 +1177,11 @@ func TestMultipleSnapshotsAndReopen(t *testing.T) { func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0xF0}, Slot{0x01}, []byte{0x01}) @@ -1111,7 +1206,10 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { // Reopen: working dir should be reused (SNAPSHOT_BASE matches current), // so committedVersion should be 5 (from working dir metadata), not 2 // (from the snapshot). Catchup should replay 0 entries. - s2 := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1122,8 +1220,11 @@ func TestWriteSnapshotUpdatesSnapshotBase(t *testing.T) { func TestSnapshotPreservesAllKeyTypes(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xAB} @@ -1143,7 +1244,10 @@ func TestSnapshotPreservesAllKeyTypes(t *testing.T) { require.NoError(t, s.WriteSnapshot("")) require.NoError(t, s.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1175,8 +1279,11 @@ func TestReopenAfterEmptyCommits(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) for i := 0; i < 3; i++ { @@ -1189,7 +1296,10 @@ func TestReopenAfterEmptyCommits(t *testing.T) { hashBefore := s.RootHash() require.NoError(t, s.Close()) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1206,8 +1316,11 @@ func TestReopenAfterDeletes(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xEE} @@ -1243,7 +1356,10 @@ func TestReopenAfterDeletes(t *testing.T) { hashBefore := s.RootHash() require.NoError(t, s.Close()) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1275,13 +1391,12 @@ func TestReopenAfterDeletes(t *testing.T) { // ============================================================================= func TestWALTruncationThenRollback(t *testing.T) { - dir := t.TempDir() - cfg := Config{ - SnapshotInterval: 5, - SnapshotKeepRecent: 1, - } - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 5 + cfg.SnapshotKeepRecent = 1 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) for i := 1; i <= 10; i++ { @@ -1314,15 +1429,13 @@ func TestWALTruncationThenRollback(t *testing.T) { // ============================================================================= func TestReopenAfterSnapshotAndTruncation(t *testing.T) { - dir := t.TempDir() - dbDir := filepath.Join(dir, flatkvRootDir) - cfg := Config{ - SnapshotInterval: 5, - SnapshotKeepRecent: 1, - } + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 5 + cfg.SnapshotKeepRecent = 1 - s := NewCommitStore(t.Context(), dbDir, cfg) - _, err := s.LoadVersion(0, false) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) for i := 1; i <= 10; i++ { @@ -1333,7 +1446,8 @@ func TestReopenAfterSnapshotAndTruncation(t *testing.T) { hashBefore := s.RootHash() require.NoError(t, s.Close()) - s2 := NewCommitStore(context.Background(), dbDir, cfg) + s2, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1357,8 +1471,11 @@ func TestSingleDBOpenFailure(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) require.NoError(t, s.WriteSnapshot("")) @@ -1376,7 +1493,10 @@ func TestSingleDBOpenFailure(t *testing.T) { } _ = os.Remove(filepath.Join(dbDir, "working", snapshotBaseFile)) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.Error(t, err, "open should fail when storageDB is corrupted in both working and snapshot") } @@ -1389,27 +1509,39 @@ func TestGlobalMetadataCorruption(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) require.NoError(t, s.WriteSnapshot("")) require.NoError(t, s.Close()) workingMeta := filepath.Join(dbDir, "working", metadataDir) - db, err := pebbledb.Open(context.Background(), workingMeta, types.OpenOptions{}, false) + metaCfg := pebbledb.DefaultConfig() + metaCfg.DataDir = workingMeta + metaCfg.EnableMetrics = false + db, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) snapMeta := filepath.Join(dbDir, snapshotName(1), metadataDir) - db2, err := pebbledb.Open(context.Background(), snapMeta, types.OpenOptions{}, false) + metaCfg2 := pebbledb.DefaultConfig() + metaCfg2.DataDir = snapMeta + metaCfg2.EnableMetrics = false + db2, err := pebbledb.Open(context.Background(), &metaCfg2) require.NoError(t, err) require.NoError(t, db2.Set(metaVersionKey, []byte{0xFF, 0xFF, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db2.Close()) _ = os.Remove(filepath.Join(dbDir, "working", snapshotBaseFile)) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.Error(t, err, "open should fail when global metadata is corrupted") } @@ -1422,8 +1554,11 @@ func TestWALDirectoryDeleted(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) @@ -1434,7 +1569,10 @@ func TestWALDirectoryDeleted(t *testing.T) { walDir := filepath.Join(dbDir, changelogDir) require.NoError(t, os.RemoveAll(walDir)) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1454,8 +1592,11 @@ func TestLocalMetaCorruption(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) require.NoError(t, s.WriteSnapshot("")) @@ -1463,14 +1604,20 @@ func TestLocalMetaCorruption(t *testing.T) { // Corrupt accountDB meta version in working dir: write 3 garbage bytes (expected 8). workingAccount := filepath.Join(dbDir, "working", accountDBDir) - db, err := pebbledb.Open(context.Background(), workingAccount, types.OpenOptions{}, false) + acctCfg := pebbledb.DefaultConfig() + acctCfg.DataDir = workingAccount + acctCfg.EnableMetrics = false + db, err := pebbledb.Open(context.Background(), &acctCfg) require.NoError(t, err) require.NoError(t, db.Set(metaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db.Close()) // Same corruption in the snapshot dir. snapAccount := filepath.Join(dbDir, snapshotName(1), accountDBDir) - db2, err := pebbledb.Open(context.Background(), snapAccount, types.OpenOptions{}, false) + acctCfg2 := pebbledb.DefaultConfig() + acctCfg2.DataDir = snapAccount + acctCfg2.EnableMetrics = false + db2, err := pebbledb.Open(context.Background(), &acctCfg2) require.NoError(t, err) require.NoError(t, db2.Set(metaVersionKey, []byte{0xDE, 0xAD, 0xFF}, types.WriteOptions{Sync: true})) require.NoError(t, db2.Close()) @@ -1478,7 +1625,10 @@ func TestLocalMetaCorruption(t *testing.T) { // Remove SNAPSHOT_BASE to force re-clone from corrupted snapshot. _ = os.Remove(filepath.Join(dbDir, "working", snapshotBaseFile)) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.Error(t, err, "open should fail when meta version is corrupted") require.Contains(t, err.Error(), "invalid meta version length") @@ -1492,8 +1642,11 @@ func TestWALSegmentCorruption(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) // v1 @@ -1503,7 +1656,10 @@ func TestWALSegmentCorruption(t *testing.T) { // Simulate crash between commitBatches (v2 written) and commitGlobalMetadata: // rewind global version to v1 so catchup needs to replay v2 from WAL. workingMeta := filepath.Join(dbDir, "working", metadataDir) - mdb, err := pebbledb.Open(context.Background(), workingMeta, types.OpenOptions{}, false) + metaCfg := pebbledb.DefaultConfig() + metaCfg.DataDir = workingMeta + metaCfg.EnableMetrics = false + mdb, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) require.NoError(t, mdb.Set(metaVersionKey, versionToBytes(1), types.WriteOptions{Sync: true})) require.NoError(t, mdb.Close()) @@ -1528,7 +1684,10 @@ func TestWALSegmentCorruption(t *testing.T) { require.Greater(t, corrupted, 0, "should have found at least one WAL segment to corrupt") // Request version 2: global says v1, WAL auto-truncated (empty), can't catchup to v2. - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(2, false) require.Error(t, err, "LoadVersion should fail: WAL corrupted, can't reach requested version") } @@ -1541,8 +1700,12 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(context.Background(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xE1} @@ -1575,7 +1738,8 @@ func TestAccountRowDeletePersistsAfterReopen(t *testing.T) { hashBefore := s.RootHash() require.NoError(t, s.Close()) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + s2, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1591,8 +1755,12 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(context.Background(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = dbDir + + s, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xE2} @@ -1621,15 +1789,17 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { require.NoError(t, s.Close()) // Simulate crash: rewind global version to v1 so catchup must replay v2 - workingMeta := filepath.Join(dbDir, "working", metadataDir) - mdb, err := pebbledb.Open(context.Background(), workingMeta, types.OpenOptions{}, false) + metaCfg := pebbledb.DefaultTestConfig(t) + metaCfg.DataDir = filepath.Join(dbDir, "working", metadataDir) + mdb, err := pebbledb.Open(context.Background(), &metaCfg) require.NoError(t, err) versionBuf := make([]byte, 8) versionBuf[7] = 1 // version = 1 require.NoError(t, mdb.Set(metaVersionKey, versionBuf, types.WriteOptions{Sync: true})) require.NoError(t, mdb.Close()) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + s2, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -1644,12 +1814,14 @@ func TestAccountRowDeleteSurvivesWALReplay(t *testing.T) { func TestAccountRowDeleteAfterSnapshotRollback(t *testing.T) { dir := t.TempDir() - cfg := Config{ - SnapshotInterval: 1, - SnapshotKeepRecent: 2, - } - s := NewCommitStore(context.Background(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + cfg.SnapshotInterval = 1 + cfg.SnapshotKeepRecent = 2 + + s, err := NewCommitStore(context.Background(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xE3} diff --git a/sei-db/state_db/sc/flatkv/store.go b/sei-db/state_db/sc/flatkv/store.go index 786e089235..2d75d827e4 100644 --- a/sei-db/state_db/sc/flatkv/store.go +++ b/sei-db/state_db/sc/flatkv/store.go @@ -7,9 +7,12 @@ import ( "io" "os" "path/filepath" + "runtime" "time" "github.com/sei-protocol/sei-chain/sei-db/common/metrics" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" + "github.com/sei-protocol/sei-chain/sei-db/db_engine/dbcache" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" seidbtypes "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -73,6 +76,7 @@ type pendingAccountWrite struct { // NOT thread-safe; callers must serialize all operations. type CommitStore struct { ctx context.Context + cancel context.CancelFunc config Config dbDir string @@ -114,8 +118,21 @@ type CommitStore struct { phaseTimer *metrics.PhaseTimer - readOnly bool // Set by readonly LoadVersion; guards all write methods. + // readOnly marks stores opened via LoadVersion(..., true). + readOnly bool + + // Temp working dir for readonly store; removed by Close. readOnlyWorkDir string // Temp working dir for readonly store; removed by Close. + + // A work pool for reading from the DBs. + // + // Uses a fixed-size pool. + readPool threading.Pool + + // A work pool for miscellaneous operations that are neither computationally intensive nor IO bound. + // + // Uses an elasticly-sized pool, so it is safe to submit tasks that have dependencies on other tasks in the pool. + miscPool threading.Pool } var _ Store = (*CommitStore)(nil) @@ -124,15 +141,31 @@ var _ Store = (*CommitStore)(nil) // Call LoadVersion to open and initialize. func NewCommitStore( ctx context.Context, - dbDir string, - cfg Config, -) *CommitStore { + cfg *Config, +) (*CommitStore, error) { + + cfg.InitializeDataDirectories() + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("failed to validate config: %w", err) + } + meter := otel.Meter(flatkvMeterName) + ctx, cancel := context.WithCancel(ctx) + + coreCount := runtime.NumCPU() + + readPoolSize := int(cfg.ReaderThreadsPerCore*float64(coreCount) + float64(cfg.ReaderConstantThreadCount)) + readPool := threading.NewFixedPool(ctx, "flatkv-read", readPoolSize, cfg.ReaderPoolQueueSize) + + miscPoolSize := int(cfg.MiscPoolThreadsPerCore*float64(coreCount) + float64(cfg.MiscConstantThreadCount)) + miscPool := threading.NewElasticPool(ctx, "flatkv-misc", miscPoolSize) + return &CommitStore{ ctx: ctx, - config: cfg, - dbDir: dbDir, + cancel: cancel, + config: *cfg, localMeta: make(map[string]*LocalMeta), accountWrites: make(map[string]*pendingAccountWrite), codeWrites: make(map[string]*pendingKVWrite), @@ -141,13 +174,28 @@ func NewCommitStore( pendingChangeSets: make([]*proto.NamedChangeSet, 0), committedLtHash: lthash.New(), workingLtHash: lthash.New(), - perDBWorkingLtHash: newPerDBLtHashMap(), + perDBWorkingLtHash: make(map[string]*lthash.LtHash), phaseTimer: metrics.NewPhaseTimer(meter, "seidb_main_thread"), - } + readPool: readPool, + miscPool: miscPool, + }, nil +} + +// resetPools recreates the context and thread pools after a full Close(). +func (s *CommitStore) resetPools() { + coreCount := runtime.NumCPU() + + s.ctx, s.cancel = context.WithCancel(context.Background()) + + readPoolSize := int(s.config.ReaderThreadsPerCore*float64(coreCount) + float64(s.config.ReaderConstantThreadCount)) + s.readPool = threading.NewFixedPool(s.ctx, "flatkv-read", readPoolSize, s.config.ReaderPoolQueueSize) + + miscPoolSize := int(s.config.MiscPoolThreadsPerCore*float64(coreCount) + float64(s.config.MiscConstantThreadCount)) + s.miscPool = threading.NewElasticPool(s.ctx, "flatkv-misc", miscPoolSize) } func (s *CommitStore) flatkvDir() string { - return s.dbDir + return s.config.DataDir } var errReadOnly = errors.New("flatkv: store is read-only") @@ -218,7 +266,11 @@ func (s *CommitStore) LoadVersion(targetVersion int64, readOnly bool) (_ Store, // loadVersionReadOnly creates an isolated, read-only CommitStore at the // requested version. func (s *CommitStore) loadVersionReadOnly(targetVersion int64) (_ Store, retErr error) { - ro := NewCommitStore(s.ctx, s.dbDir, s.config) + roCfg := s.config.Copy() + ro, err := NewCommitStore(s.ctx, roCfg) + if err != nil { + return nil, fmt.Errorf("failed to create readonly store: %w", err) + } workDir, err := os.MkdirTemp(ro.flatkvDir(), readOnlyDirPrefix) if err != nil { @@ -226,6 +278,12 @@ func (s *CommitStore) loadVersionReadOnly(targetVersion int64) (_ Store, retErr } ro.readOnlyWorkDir = workDir + ro.config.AccountDBConfig.DataDir = filepath.Join(workDir, accountDBDir) + ro.config.CodeDBConfig.DataDir = filepath.Join(workDir, codeDBDir) + ro.config.StorageDBConfig.DataDir = filepath.Join(workDir, storageDBDir) + ro.config.LegacyDBConfig.DataDir = filepath.Join(workDir, legacyDBDir) + ro.config.MetadataDBConfig.DataDir = filepath.Join(workDir, metadataDir) + defer func() { if retErr != nil { if closeErr := ro.Close(); closeErr != nil { @@ -394,26 +452,21 @@ func (s *CommitStore) acquireFileLock(dir string) error { return nil } -// openDBs opens all PebbleDBs from dbDir and optionally the changelog WAL -// from changelogRoot. On failure all already-opened handles are closed. -func (s *CommitStore) openDBs(dbDir, changelogRoot string) (retErr error) { - type namedPath struct { - name string - path string +// openPebbleDB creates the directory at cfg.DataDir and opens a PebbleDB instance. +func (s *CommitStore) openPebbleDB(cfg *pebbledb.PebbleDBConfig, cacheCfg *dbcache.CacheConfig) (seidbtypes.KeyValueDB, error) { + if err := os.MkdirAll(cfg.DataDir, 0750); err != nil { + return nil, fmt.Errorf("create directory %s: %w", cfg.DataDir, err) } - dbPaths := []namedPath{ - {accountDBDir, filepath.Join(dbDir, accountDBDir)}, - {codeDBDir, filepath.Join(dbDir, codeDBDir)}, - {storageDBDir, filepath.Join(dbDir, storageDBDir)}, - {legacyDBDir, filepath.Join(dbDir, legacyDBDir)}, - {metadataDir, filepath.Join(dbDir, metadataDir)}, + db, err := pebbledb.OpenWithCache(s.ctx, cfg, cacheCfg, s.readPool, s.miscPool) + if err != nil { + return nil, fmt.Errorf("open %s: %w", cfg.DataDir, err) } + return db, nil +} - for _, np := range dbPaths { - if err := os.MkdirAll(np.path, 0750); err != nil { - return fmt.Errorf("failed to create directory %s: %w", np.path, err) - } - } +// openDBs opens all PebbleDBs from dbDir and optionally the changelog WAL +// from changelogRoot. On failure all already-opened handles are closed. +func (s *CommitStore) openDBs(dbDir, changelogRoot string) (retErr error) { var toClose []io.Closer defer func() { @@ -431,31 +484,36 @@ func (s *CommitStore) openDBs(dbDir, changelogRoot string) (retErr error) { } }() - openDB := func(np namedPath) (seidbtypes.KeyValueDB, error) { - db, err := pebbledb.Open(s.ctx, np.path, seidbtypes.OpenOptions{}, s.config.EnablePebbleMetrics) - if err != nil { - return nil, fmt.Errorf("failed to open %s: %w", np.name, err) - } - toClose = append(toClose, db) - return db, nil - } - var err error - if s.accountDB, err = openDB(dbPaths[0]); err != nil { - return err + s.accountDB, err = s.openPebbleDB(&s.config.AccountDBConfig, &s.config.AccountCacheConfig) + if err != nil { + return fmt.Errorf("failed to open account DB: %w", err) } - if s.codeDB, err = openDB(dbPaths[1]); err != nil { - return err + toClose = append(toClose, s.accountDB) + + s.codeDB, err = s.openPebbleDB(&s.config.CodeDBConfig, &s.config.CodeCacheConfig) + if err != nil { + return fmt.Errorf("failed to open code DB: %w", err) } - if s.storageDB, err = openDB(dbPaths[2]); err != nil { - return err + toClose = append(toClose, s.codeDB) + + s.storageDB, err = s.openPebbleDB(&s.config.StorageDBConfig, &s.config.StorageCacheConfig) + if err != nil { + return fmt.Errorf("failed to open storage DB: %w", err) } - if s.legacyDB, err = openDB(dbPaths[3]); err != nil { - return err + toClose = append(toClose, s.storageDB) + + s.legacyDB, err = s.openPebbleDB(&s.config.LegacyDBConfig, &s.config.LegacyCacheConfig) + if err != nil { + return fmt.Errorf("failed to open legacy DB: %w", err) } - if s.metadataDB, err = openDB(dbPaths[4]); err != nil { - return err + toClose = append(toClose, s.legacyDB) + + s.metadataDB, err = s.openPebbleDB(&s.config.MetadataDBConfig, &s.config.MetadataCacheConfig) + if err != nil { + return fmt.Errorf("failed to open metadata DB: %w", err) } + toClose = append(toClose, s.metadataDB) if changelogRoot != "" { changelogPath := filepath.Join(changelogRoot, changelogDir) @@ -571,6 +629,17 @@ func (s *CommitStore) Importer(version int64) (types.Importer, error) { if s.readOnly { return nil, errReadOnly } + // rootmulti.Restore closes the store before creating an importer. + // Close() cancels the context (killing pools), so recreate them + // before reopening the DBs. + if s.isClosed() { + if s.ctx.Err() != nil { + s.resetPools() + } + if err := s.open(); err != nil { + return nil, fmt.Errorf("reopen store for import: %w", err) + } + } if err := s.resetForImport(); err != nil { return nil, fmt.Errorf("reset store for import: %w", err) } diff --git a/sei-db/state_db/sc/flatkv/store_lifecycle.go b/sei-db/state_db/sc/flatkv/store_lifecycle.go index c91731c450..9bef4159a6 100644 --- a/sei-db/state_db/sc/flatkv/store_lifecycle.go +++ b/sei-db/state_db/sc/flatkv/store_lifecycle.go @@ -69,9 +69,12 @@ func (s *CommitStore) closeDBsOnly() error { return nil } -// Close closes all database instances and releases the file lock. +// Close closes all database instances, cancels the store's context to +// stop background goroutines (pools, caches, metrics), and releases the +// file lock. func (s *CommitStore) Close() error { err := s.closeDBsOnly() + s.cancel() if s.fileLock != nil { if lockErr := s.fileLock.Unlock(); lockErr != nil { diff --git a/sei-db/state_db/sc/flatkv/store_meta_test.go b/sei-db/state_db/sc/flatkv/store_meta_test.go index 91597da338..840f9bc968 100644 --- a/sei-db/state_db/sc/flatkv/store_meta_test.go +++ b/sei-db/state_db/sc/flatkv/store_meta_test.go @@ -156,8 +156,11 @@ func TestGlobalMetadataPersistence(t *testing.T) { dir := t.TempDir() dbDir := filepath.Join(dir, flatkvRootDir) - s := NewCommitStore(t.Context(), dbDir, DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultConfig() + cfg.DataDir = dbDir + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) @@ -174,7 +177,10 @@ func TestGlobalMetadataPersistence(t *testing.T) { expectedHash := s.committedLtHash.Checksum() require.NoError(t, s.Close()) - s2 := NewCommitStore(context.Background(), dbDir, DefaultConfig()) + cfg2 := DefaultConfig() + cfg2.DataDir = dbDir + s2, err := NewCommitStore(context.Background(), cfg2) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() diff --git a/sei-db/state_db/sc/flatkv/store_test.go b/sei-db/state_db/sc/flatkv/store_test.go index 4ca23464e1..a5e1f5599e 100644 --- a/sei-db/state_db/sc/flatkv/store_test.go +++ b/sei-db/state_db/sc/flatkv/store_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/sei-protocol/sei-chain/sei-db/common/evm" + "github.com/sei-protocol/sei-chain/sei-db/common/threading" "github.com/sei-protocol/sei-chain/sei-db/db_engine/pebbledb" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -57,8 +58,10 @@ func makeChangeSet(key, value []byte, delete bool) *proto.NamedChangeSet { // setupTestDB creates a temporary PebbleDB for testing func setupTestDB(t *testing.T) types.KeyValueDB { t.Helper() - dir := t.TempDir() - db, err := pebbledb.Open(t.Context(), dir, types.OpenOptions{}, false) + cfg := pebbledb.DefaultTestConfig(t) + cacheCfg := pebbledb.DefaultTestCacheConfig() + db, err := pebbledb.OpenWithCache(t.Context(), &cfg, &cacheCfg, + threading.NewAdHocPool(), threading.NewAdHocPool()) require.NoError(t, err) return db } @@ -66,19 +69,21 @@ func setupTestDB(t *testing.T) types.KeyValueDB { // setupTestStore creates a minimal test store func setupTestStore(t *testing.T) *CommitStore { t.Helper() - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + s, err := NewCommitStore(t.Context(), DefaultTestConfig(t)) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) return s } // setupTestStoreWithConfig creates a test store with custom config -func setupTestStoreWithConfig(t *testing.T, cfg Config) *CommitStore { +func setupTestStoreWithConfig(t *testing.T, cfg *Config) *CommitStore { t.Helper() dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) return s } @@ -96,18 +101,20 @@ func commitAndCheck(t *testing.T, s *CommitStore) int64 { // ============================================================================= func TestStoreOpenClose(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) require.NoError(t, s.Close()) } func TestStoreClose(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) // Close should succeed @@ -301,8 +308,11 @@ func TestStorePersistence(t *testing.T) { key := memiavlStorageKey(addr, slot) // Write and close - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) cs := makeChangeSet(key, value, false) @@ -311,7 +321,10 @@ func TestStorePersistence(t *testing.T) { require.NoError(t, s1.Close()) // Reopen and verify - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -428,11 +441,17 @@ func TestStoreRollbackNoSnapshot(t *testing.T) { func TestFileLockPreventsDoubleOpen(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.Error(t, err, "second open on same dir should fail due to file lock") require.Contains(t, err.Error(), "file lock") @@ -449,9 +468,10 @@ func TestFileLockPreventsDoubleOpen(t *testing.T) { // ============================================================================= func TestClearChangelog(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -474,9 +494,10 @@ func TestClearChangelog(t *testing.T) { // ============================================================================= func TestCloseDBsOnlyIdempotent(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) require.NoError(t, s.closeDBsOnly()) @@ -493,8 +514,11 @@ func TestCloseDBsOnlyIdempotent(t *testing.T) { func TestLoadVersionTargetBeyondWALFails(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s1, Address{0x01}, Slot{0x01}, []byte{0x01}) @@ -502,7 +526,10 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(100, false) require.Error(t, err, "loading version beyond WAL should fail") } @@ -514,8 +541,11 @@ func TestLoadVersionTargetBeyondWALFails(t *testing.T) { func TestReopenReusesWorkingDir(t *testing.T) { dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0x01}) @@ -527,7 +557,10 @@ func TestReopenReusesWorkingDir(t *testing.T) { _, err = os.Stat(basePath) require.NoError(t, err, "SNAPSHOT_BASE should exist after close") - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -540,9 +573,10 @@ func TestReopenReusesWorkingDir(t *testing.T) { // ============================================================================= func TestWalOffsetForVersionFastPath(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -562,9 +596,10 @@ func TestWalOffsetForVersionFastPath(t *testing.T) { } func TestWalOffsetForVersionBeforeWAL(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -578,9 +613,10 @@ func TestWalOffsetForVersionBeforeWAL(t *testing.T) { } func TestWalOffsetForVersionNotFound(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -597,8 +633,11 @@ func TestWalOffsetForVersionNotFound(t *testing.T) { func TestCatchupFromSpecificVersion(t *testing.T) { dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) for i := 0; i < 10; i++ { @@ -609,7 +648,10 @@ func TestCatchupFromSpecificVersion(t *testing.T) { require.NoError(t, s1.WriteSnapshot("")) require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -658,8 +700,11 @@ func TestPersistenceAllKeyTypes(t *testing.T) { addr := Address{0xAA} slot := Slot{0xBB} - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s1.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s1.LoadVersion(0, false) require.NoError(t, err) storageKey := evm.BuildMemIAVLEVMKey(evm.EVMKeyStorage, StorageKey(addr, slot)) @@ -677,7 +722,10 @@ func TestPersistenceAllKeyTypes(t *testing.T) { hash := s1.RootHash() require.NoError(t, s1.Close()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg = DefaultTestConfig(t) + cfg.DataDir = filepath.Join(dir, flatkvRootDir) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) _, err = s2.LoadVersion(0, false) require.NoError(t, err) defer s2.Close() @@ -703,9 +751,9 @@ func TestPersistenceAllKeyTypes(t *testing.T) { // ============================================================================= func TestReadOnlyBasicLoadAndRead(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + s, err := NewCommitStore(t.Context(), DefaultTestConfig(t)) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xAA} @@ -729,9 +777,10 @@ func TestReadOnlyBasicLoadAndRead(t *testing.T) { } func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { - dir := t.TempDir() - writer := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := writer.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + writer, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = writer.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xCC} @@ -743,7 +792,8 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { commitAndCheck(t, writer) require.NoError(t, writer.Close()) - fresh := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + fresh, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) ro, err := fresh.LoadVersion(0, true) require.NoError(t, err) defer ro.Close() @@ -755,9 +805,10 @@ func TestReadOnlyLoadFromUnopenedStore(t *testing.T) { } func TestReadOnlyAtSpecificVersion(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotInterval: 2, SnapshotKeepRecent: 10}) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0x11} @@ -782,9 +833,10 @@ func TestReadOnlyAtSpecificVersion(t *testing.T) { } func TestReadOnlyWriteGuards(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xAA} @@ -810,9 +862,10 @@ func TestReadOnlyWriteGuards(t *testing.T) { } func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xAA} @@ -839,9 +892,12 @@ func TestReadOnlyParentWritesDuringReadOnly(t *testing.T) { } func TestReadOnlyConcurrentInstances(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{SnapshotInterval: 2, SnapshotKeepRecent: 10}) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 2 + cfg.SnapshotKeepRecent = 10 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0x11} @@ -875,9 +931,10 @@ func TestReadOnlyConcurrentInstances(t *testing.T) { } func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xAA} @@ -900,9 +957,10 @@ func TestReadOnlyFailureDoesNotAffectParent(t *testing.T) { } func TestReadOnlyCloseRemovesTempDir(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) addr := Address{0xAA} @@ -923,8 +981,9 @@ func TestReadOnlyCloseRemovesTempDir(t *testing.T) { } func TestCleanupOrphanedReadOnlyDirs(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) defer func() { require.NoError(t, s.Close()) }() fkvDir := s.flatkvDir() @@ -941,20 +1000,22 @@ func TestCleanupOrphanedReadOnlyDirs(t *testing.T) { require.NoDirExists(t, orphan1) require.NoDirExists(t, orphan2) - _, err := s.LoadVersion(0, false) + _, err = s.LoadVersion(0, false) require.NoError(t, err) } func TestCleanupOrphanedReadOnlyDirsHoldsWriterLock(t *testing.T) { - dir := t.TempDir() - s1 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + cfg := DefaultTestConfig(t) + s1, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) defer func() { require.NoError(t, s1.Close()) }() require.NoError(t, s1.CleanupOrphanedReadOnlyDirs()) - s2 := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) + s2, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) defer func() { require.NoError(t, s2.Close()) }() - err := s2.CleanupOrphanedReadOnlyDirs() + err = s2.CleanupOrphanedReadOnlyDirs() require.Error(t, err) require.ErrorContains(t, err, "acquire file lock") } diff --git a/sei-db/state_db/sc/flatkv/store_write.go b/sei-db/state_db/sc/flatkv/store_write.go index 4011bd9c96..c201391f37 100644 --- a/sei-db/state_db/sc/flatkv/store_write.go +++ b/sei-db/state_db/sc/flatkv/store_write.go @@ -2,10 +2,10 @@ package flatkv import ( "encoding/binary" + "errors" "fmt" "sync" - errorutils "github.com/sei-protocol/sei-chain/sei-db/common/errors" "github.com/sei-protocol/sei-chain/sei-db/common/evm" "github.com/sei-protocol/sei-chain/sei-db/db_engine/types" "github.com/sei-protocol/sei-chain/sei-db/proto" @@ -24,6 +24,14 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return errReadOnly } + s.phaseTimer.SetPhase("apply_change_sets_batch_read") + + // Batch read all old values from DBs in parallel. + storageOld, accountOld, codeOld, legacyOld, err := s.batchReadOldValues(cs) + if err != nil { + return fmt.Errorf("failed to batch read old values: %w", err) + } + s.phaseTimer.SetPhase("apply_change_sets_prepare") s.pendingChangeSets = append(s.pendingChangeSets, cs...) @@ -54,24 +62,22 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { // Route to appropriate DB based on key type switch kind { case evm.EVMKeyStorage: - // Get old value for LtHash - oldValue, err := s.getStorageValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get storage value: %w", err) - } - // Storage: keyBytes = addr(20) || slot(32) keyStr := string(keyBytes) + oldValue := storageOld[keyStr].Value + if pair.Delete { s.storageWrites[keyStr] = &pendingKVWrite{ key: keyBytes, isDelete: true, } + storageOld[keyStr] = types.BatchGetResult{Value: nil} } else { s.storageWrites[keyStr] = &pendingKVWrite{ key: keyBytes, value: pair.Value, } + storageOld[keyStr] = types.BatchGetResult{Value: pair.Value} } // LtHash pair: internal key directly @@ -89,6 +95,7 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { return fmt.Errorf("invalid address length %d for key kind %d", len(keyBytes), kind) } addrStr := string(addr[:]) + addrKey := string(AccountKey(addr)) if _, seen := oldAccountRawValues[addrStr]; !seen { if paw, ok := s.accountWrites[addrStr]; ok { @@ -97,23 +104,23 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } else { oldAccountRawValues[addrStr] = paw.value.Encode() } + } else if result, ok := accountOld[addrKey]; ok { + oldAccountRawValues[addrStr] = result.Value } else { - rawBytes, err := s.accountDB.Get(AccountKey(addr)) - if err != nil { - if !errorutils.IsNotFound(err) { - return fmt.Errorf("accountDB I/O error for addr %x: %w", addr, err) - } - oldAccountRawValues[addrStr] = nil - } else { - oldAccountRawValues[addrStr] = rawBytes - } + oldAccountRawValues[addrStr] = nil } } + paw := s.accountWrites[addrStr] if paw == nil { - existingValue, err := s.getAccountValue(addr) - if err != nil { - return fmt.Errorf("failed to load existing account value: %w", err) + var existingValue AccountValue + result := accountOld[addrKey] + if result.IsFound() && result.Value != nil { + av, err := DecodeAccountValue(result.Value) + if err != nil { + return fmt.Errorf("corrupted AccountValue for addr %x: %w", addr, err) + } + existingValue = av } paw = &pendingAccountWrite{ addr: addr, @@ -132,12 +139,14 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } else { if kind == evm.EVMKeyNonce { if len(pair.Value) != NonceLen { - return fmt.Errorf("invalid nonce value length: got %d, expected %d", len(pair.Value), NonceLen) + return fmt.Errorf("invalid nonce value length: got %d, expected %d", + len(pair.Value), NonceLen) } paw.value.Nonce = binary.BigEndian.Uint64(pair.Value) } else { if len(pair.Value) != CodeHashLen { - return fmt.Errorf("invalid codehash value length: got %d, expected %d", len(pair.Value), CodeHashLen) + return fmt.Errorf("invalid codehash value length: got %d, expected %d", + len(pair.Value), CodeHashLen) } copy(paw.value.CodeHash[:], pair.Value) } @@ -145,24 +154,22 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { } case evm.EVMKeyCode: - // Get old value for LtHash - oldValue, err := s.getCodeValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get code value: %w", err) - } - // Code: keyBytes = addr(20) - per x/evm/types/keys.go keyStr := string(keyBytes) + oldValue := codeOld[keyStr].Value + if pair.Delete { s.codeWrites[keyStr] = &pendingKVWrite{ key: keyBytes, isDelete: true, } + codeOld[keyStr] = types.BatchGetResult{Value: nil} } else { s.codeWrites[keyStr] = &pendingKVWrite{ key: keyBytes, value: pair.Value, } + codeOld[keyStr] = types.BatchGetResult{Value: pair.Value} } // LtHash pair: internal key directly @@ -174,22 +181,21 @@ func (s *CommitStore) ApplyChangeSets(cs []*proto.NamedChangeSet) error { }) case evm.EVMKeyLegacy: - oldValue, err := s.getLegacyValue(keyBytes) - if err != nil { - return fmt.Errorf("failed to get legacy value: %w", err) - } - keyStr := string(keyBytes) + oldValue := legacyOld[keyStr].Value + if pair.Delete { s.legacyWrites[keyStr] = &pendingKVWrite{ key: keyBytes, isDelete: true, } + legacyOld[keyStr] = types.BatchGetResult{Value: nil} } else { s.legacyWrites[keyStr] = &pendingKVWrite{ key: keyBytes, value: pair.Value, } + legacyOld[keyStr] = types.BatchGetResult{Value: pair.Value} } legacyPairs = append(legacyPairs, lthash.KVPairWithLastValue{ @@ -319,10 +325,13 @@ func (s *CommitStore) flushAllDBs() error { var wg sync.WaitGroup wg.Add(4) for i, db := range []types.KeyValueDB{s.accountDB, s.codeDB, s.storageDB, s.legacyDB} { - go func(idx int, db types.KeyValueDB) { - defer wg.Done() - errs[idx] = db.Flush() - }(i, db) + err := s.miscPool.Submit(s.ctx, func() { + errs[i] = db.Flush() + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit flush: %w", err) + } } wg.Wait() names := [4]string{"accountDB", "codeDB", "storageDB", "legacyDB"} @@ -430,10 +439,13 @@ func (s *CommitStore) commitBatches(version int64) error { var wg sync.WaitGroup wg.Add(len(pending)) for i, p := range pending { - go func(idx int, b types.Batch) { - defer wg.Done() - errs[idx] = b.Commit(syncOpt) - }(i, p.batch) + err := s.miscPool.Submit(s.ctx, func() { + errs[i] = p.batch.Commit(syncOpt) + wg.Done() + }) + if err != nil { + return fmt.Errorf("failed to submit commit: %w", err) + } } wg.Wait() @@ -452,3 +464,168 @@ func (s *CommitStore) commitBatches(version int64) error { } return nil } + +// batchReadOldValues scans all changeset pairs and returns one result map per +// DB containing the "old value" for each key. Keys that already have uncommitted +// pending writes (from a prior ApplyChangeSets call in the same block) are +// resolved from those pending writes directly and excluded from the DB batch +// read, avoiding unnecessary I/O and cache pollution. +func (s *CommitStore) batchReadOldValues(cs []*proto.NamedChangeSet) ( + storageOld map[string]types.BatchGetResult, + accountOld map[string]types.BatchGetResult, + codeOld map[string]types.BatchGetResult, + legacyOld map[string]types.BatchGetResult, + err error, +) { + storageOld = make(map[string]types.BatchGetResult) + accountOld = make(map[string]types.BatchGetResult) + codeOld = make(map[string]types.BatchGetResult) + legacyOld = make(map[string]types.BatchGetResult) + + // Separate maps for keys that need a DB read (no pending write). + storageBatch := make(map[string]types.BatchGetResult) + accountBatch := make(map[string]types.BatchGetResult) + codeBatch := make(map[string]types.BatchGetResult) + legacyBatch := make(map[string]types.BatchGetResult) + + pendingKVResult := func(pw *pendingKVWrite) types.BatchGetResult { + if pw.isDelete { + return types.BatchGetResult{Value: nil} + } + return types.BatchGetResult{Value: pw.value} + } + + // Partition changeset keys: resolve from pending writes when available + // (prior ApplyChangeSets call in the same block), otherwise queue for + // a DB batch read. + for _, namedCS := range cs { + if namedCS.Changeset.Pairs == nil { + continue + } + for _, pair := range namedCS.Changeset.Pairs { + kind, keyBytes := evm.ParseEVMKey(pair.Key) + switch kind { + case evm.EVMKeyStorage: + k := string(keyBytes) + if _, done := storageOld[k]; done { + continue + } + if pw, ok := s.storageWrites[k]; ok { + storageOld[k] = pendingKVResult(pw) + } else { + storageBatch[k] = types.BatchGetResult{} + } + + case evm.EVMKeyNonce, evm.EVMKeyCodeHash: + addr, ok := AddressFromBytes(keyBytes) + if !ok { + continue + } + k := string(AccountKey(addr)) + if _, done := accountOld[k]; done { + continue + } + if paw, ok := s.accountWrites[k]; ok { + accountOld[k] = types.BatchGetResult{Value: EncodeAccountValue(paw.value)} + } else { + accountBatch[k] = types.BatchGetResult{} + } + + case evm.EVMKeyCode: + k := string(keyBytes) + if _, done := codeOld[k]; done { + continue + } + if pw, ok := s.codeWrites[k]; ok { + codeOld[k] = pendingKVResult(pw) + } else { + codeBatch[k] = types.BatchGetResult{} + } + + case evm.EVMKeyLegacy: + k := string(keyBytes) + if _, done := legacyOld[k]; done { + continue + } + if pw, ok := s.legacyWrites[k]; ok { + legacyOld[k] = pendingKVResult(pw) + } else { + legacyBatch[k] = types.BatchGetResult{} + } + } + } + } + + // Issue parallel BatchGet calls only for keys that need a DB read. + var wg sync.WaitGroup + var storageErr, accountErr, codeErr, legacyErr error + + if len(storageBatch) > 0 { + wg.Add(1) + err = s.miscPool.Submit(s.ctx, func() { + defer wg.Done() + storageErr = s.storageDB.BatchGet(storageBatch) + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } + } + + if len(accountBatch) > 0 { + wg.Add(1) + err = s.miscPool.Submit(s.ctx, func() { + defer wg.Done() + accountErr = s.accountDB.BatchGet(accountBatch) + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } + } + + if len(codeBatch) > 0 { + wg.Add(1) + err = s.miscPool.Submit(s.ctx, func() { + defer wg.Done() + codeErr = s.codeDB.BatchGet(codeBatch) + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } + } + + if len(legacyBatch) > 0 { + wg.Add(1) + err = s.miscPool.Submit(s.ctx, func() { + defer wg.Done() + legacyErr = s.legacyDB.BatchGet(legacyBatch) + }) + if err != nil { + err = fmt.Errorf("failed to submit batch get: %w", err) + return + } + } + + wg.Wait() + if err = errors.Join(storageErr, accountErr, codeErr, legacyErr); err != nil { + return + } + + // Merge DB results into the result maps. + for k, v := range storageBatch { + storageOld[k] = v + } + for k, v := range accountBatch { + accountOld[k] = v + } + for k, v := range codeBatch { + codeOld[k] = v + } + for k, v := range legacyBatch { + legacyOld[k] = v + } + + return +} diff --git a/sei-db/state_db/sc/flatkv/store_write_test.go b/sei-db/state_db/sc/flatkv/store_write_test.go index 27bc70c896..0b9da7700f 100644 --- a/sei-db/state_db/sc/flatkv/store_write_test.go +++ b/sei-db/state_db/sc/flatkv/store_write_test.go @@ -2,7 +2,6 @@ package flatkv import ( "encoding/binary" - "path/filepath" "testing" "time" @@ -508,9 +507,10 @@ func TestStoreLegacyEmptyCommitLocalMeta(t *testing.T) { func TestStoreFsyncConfig(t *testing.T) { t.Run("DefaultConfig", func(t *testing.T) { - dir := t.TempDir() - store := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := store.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + store, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = store.LoadVersion(0, false) require.NoError(t, err) defer store.Close() @@ -520,11 +520,11 @@ func TestStoreFsyncConfig(t *testing.T) { }) t.Run("FsyncDisabled", func(t *testing.T) { - dir := t.TempDir() - store := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), Config{ - Fsync: false, - }) - _, err := store.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.Fsync = false + store, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = store.LoadVersion(0, false) require.NoError(t, err) defer store.Close() @@ -552,13 +552,12 @@ func TestStoreFsyncConfig(t *testing.T) { // ============================================================================= func TestAutoSnapshotTriggeredByInterval(t *testing.T) { - dir := t.TempDir() - cfg := Config{ - SnapshotInterval: 5, - SnapshotKeepRecent: 2, - } - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 5 + cfg.SnapshotKeepRecent = 2 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -576,13 +575,12 @@ func TestAutoSnapshotTriggeredByInterval(t *testing.T) { } func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { - dir := t.TempDir() - cfg := Config{ - SnapshotInterval: 10, - SnapshotKeepRecent: 2, - } - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 10 + cfg.SnapshotKeepRecent = 2 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -606,10 +604,11 @@ func TestAutoSnapshotNotTriggeredBeforeInterval(t *testing.T) { } func TestAutoSnapshotDisabledWhenIntervalZero(t *testing.T) { - dir := t.TempDir() - cfg := Config{SnapshotInterval: 0} - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.SnapshotInterval = 0 + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -699,9 +698,10 @@ func TestMultipleApplyAccountFieldsPreservesOther(t *testing.T) { func TestLtHashDeterministicAcrossReopen(t *testing.T) { writeAndGetHash := func() []byte { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) @@ -822,10 +822,11 @@ func TestEmptyCommitAdvancesVersion(t *testing.T) { // ============================================================================= func TestStoreFsyncEnabled(t *testing.T) { - dir := t.TempDir() - cfg := Config{Fsync: true} - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), cfg) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + cfg.Fsync = true + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -844,9 +845,10 @@ func TestStoreFsyncEnabled(t *testing.T) { // ============================================================================= func TestLastSnapshotTimeUpdated(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) defer s.Close() @@ -864,9 +866,10 @@ func TestLastSnapshotTimeUpdated(t *testing.T) { // ============================================================================= func TestWALRecordsChangesets(t *testing.T) { - dir := t.TempDir() - s := NewCommitStore(t.Context(), filepath.Join(dir, flatkvRootDir), DefaultConfig()) - _, err := s.LoadVersion(0, false) + cfg := DefaultTestConfig(t) + s, err := NewCommitStore(t.Context(), cfg) + require.NoError(t, err) + _, err = s.LoadVersion(0, false) require.NoError(t, err) commitStorageEntry(t, s, Address{0x01}, Slot{0x01}, []byte{0xAA}) @@ -1435,6 +1438,38 @@ func TestAccountRowGCWriteZeroOrderIndependent(t *testing.T) { // Write Test Helpers // ============================================================================= +// TestLtHashExistingAccountNonceUpdate is a focused regression test for the +// oldAccountRawValues bug: when an account already exists in the DB and a new +// block updates its nonce (the most common case — every tx increments sender +// nonce), the LtHash delta must MixOut the old encoded AccountValue before +// MixIn'ing the new one. The bug sets oldAccountRawValues[addr] = nil instead +// of the DB value when s.accountWrites has no pending entry, causing the +// MixOut to be skipped and the LtHash to diverge from ground truth. +func TestLtHashExistingAccountNonceUpdate(t *testing.T) { + s := setupTestStore(t) + defer s.Close() + + addr := addrN(0xE1) + + // Block 1: create account with nonce=1 (new account — oldAccountRawValues + // correctly nil here since nothing exists in DB). + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ + namedCS(noncePair(addr, 1)), + })) + commitAndCheck(t, s) + verifyLtHashAtHeight(t, s, 1) // should pass: new account, nil old is correct + + // Block 2: update nonce to 2. The account now EXISTS in accountDB with + // encoded(nonce=1). The buggy code sets oldAccountRawValues[addr] = nil + // because s.accountWrites is empty after the block-1 commit cleared it. + // The correct old value is the DB's encoded(nonce=1). + require.NoError(t, s.ApplyChangeSets([]*proto.NamedChangeSet{ + namedCS(noncePair(addr, 2)), + })) + commitAndCheck(t, s) + verifyLtHashAtHeight(t, s, 2) // FAILS: incremental skipped MixOut of old value +} + func countLiveEntries(t *testing.T, db types.KeyValueDB) int { t.Helper() iter, err := db.NewIter(&types.IterOptions{})