mirror of
https://github.com/pese-git/cherrypick.git
synced 2026-01-24 05:25:19 +00:00
refactor: rename benchmark_cherrypick to benchmark_di, update paths, pubspec, imports, and documentation
This commit is contained in:
24
benchmark_di/lib/cli/report/csv_report.dart
Normal file
24
benchmark_di/lib/cli/report/csv_report.dart
Normal file
@@ -0,0 +1,24 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a CSV-formatted report for benchmark results.
|
||||
class CsvReport extends ReportGenerator {
|
||||
/// List of all keys/columns to include in the CSV output.
|
||||
@override
|
||||
final List<String> keys = [
|
||||
'benchmark','chainCount','nestingDepth','mean_us','median_us','stddev_us',
|
||||
'min_us','max_us','trials','timings_us','memory_diff_kb','delta_peak_kb','peak_rss_kb'
|
||||
];
|
||||
/// Renders rows as a CSV table string.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
final header = keys.join(',');
|
||||
final lines = rows.map((r) =>
|
||||
keys.map((k) {
|
||||
final v = r[k];
|
||||
if (v is List) return '"${v.join(';')}"';
|
||||
return (v ?? '').toString();
|
||||
}).join(',')
|
||||
).toList();
|
||||
return ([header] + lines).join('\n');
|
||||
}
|
||||
}
|
||||
13
benchmark_di/lib/cli/report/json_report.dart
Normal file
13
benchmark_di/lib/cli/report/json_report.dart
Normal file
@@ -0,0 +1,13 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a JSON-formatted report for benchmark results.
|
||||
class JsonReport extends ReportGenerator {
|
||||
/// No specific keys; outputs all fields in raw map.
|
||||
@override
|
||||
List<String> get keys => [];
|
||||
/// Renders all result rows as a pretty-printed JSON array.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
return '[\n${rows.map((r) => ' $r').join(',\n')}\n]';
|
||||
}
|
||||
}
|
||||
78
benchmark_di/lib/cli/report/markdown_report.dart
Normal file
78
benchmark_di/lib/cli/report/markdown_report.dart
Normal file
@@ -0,0 +1,78 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a Markdown-formatted report for benchmark results.
|
||||
///
|
||||
/// Displays result rows as a visually clear Markdown table including a legend for all metrics.
|
||||
class MarkdownReport extends ReportGenerator {
|
||||
/// List of columns (keys) to show in the Markdown table.
|
||||
@override
|
||||
final List<String> keys = [
|
||||
'benchmark','chainCount','nestingDepth','mean_us','median_us','stddev_us',
|
||||
'min_us','max_us','trials','memory_diff_kb','delta_peak_kb','peak_rss_kb'
|
||||
];
|
||||
|
||||
/// Friendly display names for each benchmark type.
|
||||
static const nameMap = {
|
||||
'Universal_UniversalBenchmark.registerSingleton':'RegisterSingleton',
|
||||
'Universal_UniversalBenchmark.chainSingleton':'ChainSingleton',
|
||||
'Universal_UniversalBenchmark.chainFactory':'ChainFactory',
|
||||
'Universal_UniversalBenchmark.chainAsync':'AsyncChain',
|
||||
'Universal_UniversalBenchmark.named':'Named',
|
||||
'Universal_UniversalBenchmark.override':'Override',
|
||||
};
|
||||
|
||||
/// Renders all results as a formatted Markdown table with aligned columns and a legend.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
final headers = [
|
||||
'Benchmark', 'Chain Count', 'Depth', 'Mean (us)', 'Median', 'Stddev', 'Min', 'Max', 'N', 'ΔRSS(KB)', 'ΔPeak(KB)', 'PeakRSS(KB)'
|
||||
];
|
||||
final dataRows = rows.map((r) {
|
||||
final readableName = nameMap[r['benchmark']] ?? r['benchmark'];
|
||||
return [
|
||||
readableName,
|
||||
r['chainCount'],
|
||||
r['nestingDepth'],
|
||||
r['mean_us'],
|
||||
r['median_us'],
|
||||
r['stddev_us'],
|
||||
r['min_us'],
|
||||
r['max_us'],
|
||||
r['trials'],
|
||||
r['memory_diff_kb'],
|
||||
r['delta_peak_kb'],
|
||||
r['peak_rss_kb'],
|
||||
].map((cell) => cell.toString()).toList();
|
||||
}).toList();
|
||||
|
||||
// Calculate column width for pretty alignment
|
||||
final all = [headers] + dataRows;
|
||||
final widths = List.generate(headers.length, (i) {
|
||||
return all.map((row) => row[i].length).reduce((a, b) => a > b ? a : b);
|
||||
});
|
||||
|
||||
String rowToLine(List<String> row, {String sep = ' | '}) =>
|
||||
'| ${List.generate(row.length, (i) => row[i].padRight(widths[i])).join(sep)} |';
|
||||
|
||||
final headerLine = rowToLine(headers);
|
||||
final divider = '| ${widths.map((w) => '-' * w).join(' | ')} |';
|
||||
final lines = dataRows.map(rowToLine).toList();
|
||||
|
||||
final legend = '''
|
||||
> **Legend:**
|
||||
> `Benchmark` – Test name
|
||||
> `Chain Count` – Number of independent chains
|
||||
> `Depth` – Depth of each chain
|
||||
> `Mean (us)` – Average time per run (microseconds)
|
||||
> `Median` – Median time per run
|
||||
> `Stddev` – Standard deviation
|
||||
> `Min`, `Max` – Min/max run time
|
||||
> `N` – Number of measurements
|
||||
> `ΔRSS(KB)` – Change in process memory (KB)
|
||||
> `ΔPeak(KB)` – Change in peak RSS (KB)
|
||||
> `PeakRSS(KB)` – Max observed RSS memory (KB)
|
||||
''';
|
||||
|
||||
return '$legend\n\n${([headerLine, divider] + lines).join('\n')}' ;
|
||||
}
|
||||
}
|
||||
50
benchmark_di/lib/cli/report/pretty_report.dart
Normal file
50
benchmark_di/lib/cli/report/pretty_report.dart
Normal file
@@ -0,0 +1,50 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a human-readable, tab-delimited report for benchmark results.
|
||||
///
|
||||
/// Used for terminal and log output; shows each result as a single line with labeled headers.
|
||||
class PrettyReport extends ReportGenerator {
|
||||
/// List of columns to output in the pretty report.
|
||||
@override
|
||||
final List<String> keys = [
|
||||
'benchmark','chainCount','nestingDepth','mean_us','median_us','stddev_us',
|
||||
'min_us','max_us','trials','memory_diff_kb','delta_peak_kb','peak_rss_kb'
|
||||
];
|
||||
|
||||
/// Mappings from internal benchmark IDs to display names.
|
||||
static const nameMap = {
|
||||
'Universal_UniversalBenchmark.registerSingleton': 'RegisterSingleton',
|
||||
'Universal_UniversalBenchmark.chainSingleton': 'ChainSingleton',
|
||||
'Universal_UniversalBenchmark.chainFactory': 'ChainFactory',
|
||||
'Universal_UniversalBenchmark.chainAsync': 'AsyncChain',
|
||||
'Universal_UniversalBenchmark.named': 'Named',
|
||||
'Universal_UniversalBenchmark.override': 'Override',
|
||||
};
|
||||
|
||||
/// Renders the results as a header + tab-separated value table.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
final headers = [
|
||||
'Benchmark', 'Chain Count', 'Depth', 'Mean (us)', 'Median', 'Stddev', 'Min', 'Max', 'N', 'ΔRSS(KB)', 'ΔPeak(KB)', 'PeakRSS(KB)'
|
||||
];
|
||||
final header = headers.join('\t');
|
||||
final lines = rows.map((r) {
|
||||
final readableName = nameMap[r['benchmark']] ?? r['benchmark'];
|
||||
return [
|
||||
readableName,
|
||||
r['chainCount'],
|
||||
r['nestingDepth'],
|
||||
r['mean_us'],
|
||||
r['median_us'],
|
||||
r['stddev_us'],
|
||||
r['min_us'],
|
||||
r['max_us'],
|
||||
r['trials'],
|
||||
r['memory_diff_kb'],
|
||||
r['delta_peak_kb'],
|
||||
r['peak_rss_kb'],
|
||||
].join('\t');
|
||||
}).toList();
|
||||
return ([header] + lines).join('\n');
|
||||
}
|
||||
}
|
||||
9
benchmark_di/lib/cli/report/report_generator.dart
Normal file
9
benchmark_di/lib/cli/report/report_generator.dart
Normal file
@@ -0,0 +1,9 @@
|
||||
/// Abstract base for generating benchmark result reports in different formats.
|
||||
///
|
||||
/// Subclasses implement [render] to output results, and [keys] to define columns (if any).
|
||||
abstract class ReportGenerator {
|
||||
/// Renders the given [results] as a formatted string (table, markdown, csv, etc).
|
||||
String render(List<Map<String, dynamic>> results);
|
||||
/// List of output columns/keys included in the export (or [] for auto/all).
|
||||
List<String> get keys;
|
||||
}
|
||||
Reference in New Issue
Block a user