mirror of
https://github.com/pese-git/cherrypick.git
synced 2026-01-24 05:25:19 +00:00
Add English documentation comments to all benchmark_cherrypick source files (adapters, scenarios, CLI, reporters, runner)
This commit is contained in:
@@ -12,7 +12,13 @@ import 'package:benchmark_cherrypick/benchmarks/universal_chain_benchmark.dart';
|
||||
import 'package:benchmark_cherrypick/benchmarks/universal_chain_async_benchmark.dart';
|
||||
import 'package:benchmark_cherrypick/di_adapters/cherrypick_adapter.dart';
|
||||
|
||||
/// Command-line interface (CLI) runner for benchmarks.
|
||||
///
|
||||
/// Parses CLI arguments, orchestrates benchmarks for different
|
||||
/// scenarios and configurations, collects results, and generates reports
|
||||
/// in the desired output format.
|
||||
class BenchmarkCliRunner {
|
||||
/// Runs benchmarks based on CLI [args], configuring different test scenarios.
|
||||
Future<void> run(List<String> args) async {
|
||||
final config = parseBenchmarkCli(args);
|
||||
final results = <Map<String, dynamic>>[];
|
||||
|
||||
@@ -3,15 +3,23 @@ import 'dart:io';
|
||||
import 'package:args/args.dart';
|
||||
import 'package:benchmark_cherrypick/scenarios/universal_chain_module.dart';
|
||||
|
||||
/// Enum describing all supported Universal DI benchmark types.
|
||||
enum UniversalBenchmark {
|
||||
/// Simple singleton registration benchmark
|
||||
registerSingleton,
|
||||
/// Chain of singleton dependencies
|
||||
chainSingleton,
|
||||
/// Chain using factories
|
||||
chainFactory,
|
||||
/// Async chain resolution
|
||||
chainAsync,
|
||||
/// Named registration benchmark
|
||||
named,
|
||||
/// Override/child-scope benchmark
|
||||
override,
|
||||
}
|
||||
|
||||
/// Maps [UniversalBenchmark] to the scenario enum for DI chains.
|
||||
UniversalScenario toScenario(UniversalBenchmark b) {
|
||||
switch (b) {
|
||||
case UniversalBenchmark.registerSingleton:
|
||||
@@ -29,6 +37,7 @@ UniversalScenario toScenario(UniversalBenchmark b) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Maps benchmark to registration mode (singleton/factory/async).
|
||||
UniversalBindingMode toMode(UniversalBenchmark b) {
|
||||
switch (b) {
|
||||
case UniversalBenchmark.registerSingleton:
|
||||
@@ -46,6 +55,7 @@ UniversalBindingMode toMode(UniversalBenchmark b) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Utility to parse a string into its corresponding enum value [T].
|
||||
T parseEnum<T>(String value, List<T> values, T defaultValue) {
|
||||
return values.firstWhere(
|
||||
(v) => v.toString().split('.').last.toLowerCase() == value.toLowerCase(),
|
||||
@@ -53,15 +63,23 @@ T parseEnum<T>(String value, List<T> values, T defaultValue) {
|
||||
);
|
||||
}
|
||||
|
||||
/// Parses comma-separated integer list from [s].
|
||||
List<int> parseIntList(String s) =>
|
||||
s.split(',').map((e) => int.tryParse(e.trim()) ?? 0).where((x) => x > 0).toList();
|
||||
|
||||
/// CLI config describing what and how to benchmark.
|
||||
class BenchmarkCliConfig {
|
||||
/// Benchmarks enabled to run (scenarios).
|
||||
final List<UniversalBenchmark> benchesToRun;
|
||||
/// List of chain counts (parallel, per test).
|
||||
final List<int> chainCounts;
|
||||
/// List of nesting depths (max chain length, per test).
|
||||
final List<int> nestDepths;
|
||||
/// How many times to repeat each trial.
|
||||
final int repeats;
|
||||
/// How many times to warm-up before measuring.
|
||||
final int warmups;
|
||||
/// Output report format.
|
||||
final String format;
|
||||
BenchmarkCliConfig({
|
||||
required this.benchesToRun,
|
||||
@@ -73,6 +91,8 @@ class BenchmarkCliConfig {
|
||||
});
|
||||
}
|
||||
|
||||
/// Parses CLI arguments [args] into a [BenchmarkCliConfig].
|
||||
/// Supports --benchmark, --chainCount, --nestingDepth, etc.
|
||||
BenchmarkCliConfig parseBenchmarkCli(List<String> args) {
|
||||
final parser = ArgParser()
|
||||
..addOption('benchmark', abbr: 'b', defaultsTo: 'chainSingleton')
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a CSV-formatted report for benchmark results.
|
||||
class CsvReport extends ReportGenerator {
|
||||
/// List of all keys/columns to include in the CSV output.
|
||||
@override
|
||||
final List<String> keys = [
|
||||
'benchmark','chainCount','nestingDepth','mean_us','median_us','stddev_us',
|
||||
'min_us','max_us','trials','timings_us','memory_diff_kb','delta_peak_kb','peak_rss_kb'
|
||||
];
|
||||
/// Renders rows as a CSV table string.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
final header = keys.join(',');
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a JSON-formatted report for benchmark results.
|
||||
class JsonReport extends ReportGenerator {
|
||||
/// No specific keys; outputs all fields in raw map.
|
||||
@override
|
||||
List<String> get keys => [];
|
||||
/// Renders all result rows as a pretty-printed JSON array.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
return '[\n${rows.map((r) => ' $r').join(',\n')}\n]';
|
||||
|
||||
@@ -1,11 +1,17 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a Markdown-formatted report for benchmark results.
|
||||
///
|
||||
/// Displays result rows as a visually clear Markdown table including a legend for all metrics.
|
||||
class MarkdownReport extends ReportGenerator {
|
||||
/// List of columns (keys) to show in the Markdown table.
|
||||
@override
|
||||
final List<String> keys = [
|
||||
'benchmark','chainCount','nestingDepth','mean_us','median_us','stddev_us',
|
||||
'min_us','max_us','trials','memory_diff_kb','delta_peak_kb','peak_rss_kb'
|
||||
];
|
||||
|
||||
/// Friendly display names for each benchmark type.
|
||||
static const nameMap = {
|
||||
'Universal_UniversalBenchmark.registerSingleton':'RegisterSingleton',
|
||||
'Universal_UniversalBenchmark.chainSingleton':'ChainSingleton',
|
||||
@@ -15,6 +21,7 @@ class MarkdownReport extends ReportGenerator {
|
||||
'Universal_UniversalBenchmark.override':'Override',
|
||||
};
|
||||
|
||||
/// Renders all results as a formatted Markdown table with aligned columns and a legend.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
final headers = [
|
||||
@@ -38,7 +45,7 @@ class MarkdownReport extends ReportGenerator {
|
||||
].map((cell) => cell.toString()).toList();
|
||||
}).toList();
|
||||
|
||||
// Вычислить ширину каждой колонки
|
||||
// Calculate column width for pretty alignment
|
||||
final all = [headers] + dataRows;
|
||||
final widths = List.generate(headers.length, (i) {
|
||||
return all.map((row) => row[i].length).reduce((a, b) => a > b ? a : b);
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
import 'report_generator.dart';
|
||||
|
||||
/// Generates a human-readable, tab-delimited report for benchmark results.
|
||||
///
|
||||
/// Used for terminal and log output; shows each result as a single line with labeled headers.
|
||||
class PrettyReport extends ReportGenerator {
|
||||
/// List of columns to output in the pretty report.
|
||||
@override
|
||||
final List<String> keys = [
|
||||
'benchmark','chainCount','nestingDepth','mean_us','median_us','stddev_us',
|
||||
'min_us','max_us','trials','memory_diff_kb','delta_peak_kb','peak_rss_kb'
|
||||
];
|
||||
|
||||
/// Mappings from internal benchmark IDs to display names.
|
||||
static const nameMap = {
|
||||
'Universal_UniversalBenchmark.registerSingleton': 'RegisterSingleton',
|
||||
'Universal_UniversalBenchmark.chainSingleton': 'ChainSingleton',
|
||||
@@ -16,6 +21,7 @@ class PrettyReport extends ReportGenerator {
|
||||
'Universal_UniversalBenchmark.override': 'Override',
|
||||
};
|
||||
|
||||
/// Renders the results as a header + tab-separated value table.
|
||||
@override
|
||||
String render(List<Map<String, dynamic>> rows) {
|
||||
final headers = [
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
/// Abstract base for generating benchmark result reports in different formats.
|
||||
///
|
||||
/// Subclasses implement [render] to output results, and [keys] to define columns (if any).
|
||||
abstract class ReportGenerator {
|
||||
/// Renders the given [results] as a formatted string (table, markdown, csv, etc).
|
||||
String render(List<Map<String, dynamic>> results);
|
||||
/// List of output columns/keys included in the export (or [] for auto/all).
|
||||
List<String> get keys;
|
||||
}
|
||||
@@ -3,10 +3,15 @@ import 'dart:math';
|
||||
import 'package:benchmark_cherrypick/benchmarks/universal_chain_benchmark.dart';
|
||||
import 'package:benchmark_cherrypick/benchmarks/universal_chain_async_benchmark.dart';
|
||||
|
||||
/// Holds the results for a single benchmark execution.
|
||||
class BenchmarkResult {
|
||||
/// List of timings for each run (in microseconds).
|
||||
final List<num> timings;
|
||||
/// Difference in memory (RSS, in KB) after running.
|
||||
final int memoryDiffKb;
|
||||
/// Difference between peak RSS and initial RSS (in KB).
|
||||
final int deltaPeakKb;
|
||||
/// Peak RSS memory observed (in KB).
|
||||
final int peakRssKb;
|
||||
BenchmarkResult({
|
||||
required this.timings,
|
||||
@@ -14,6 +19,7 @@ class BenchmarkResult {
|
||||
required this.deltaPeakKb,
|
||||
required this.peakRssKb,
|
||||
});
|
||||
/// Computes a BenchmarkResult instance from run timings and memory data.
|
||||
factory BenchmarkResult.collect({
|
||||
required List<num> timings,
|
||||
required List<int> rssValues,
|
||||
@@ -32,7 +38,10 @@ class BenchmarkResult {
|
||||
}
|
||||
}
|
||||
|
||||
/// Static methods to execute and time benchmarks for DI containers.
|
||||
class BenchmarkRunner {
|
||||
/// Runs a synchronous benchmark ([UniversalChainBenchmark]) for a given number of [warmups] and [repeats].
|
||||
/// Collects execution time and observed memory.
|
||||
static Future<BenchmarkResult> runSync({
|
||||
required UniversalChainBenchmark benchmark,
|
||||
required int warmups,
|
||||
@@ -58,6 +67,8 @@ class BenchmarkRunner {
|
||||
return BenchmarkResult.collect(timings: timings, rssValues: rssValues, memBefore: memBefore);
|
||||
}
|
||||
|
||||
/// Runs an asynchronous benchmark ([UniversalChainAsyncBenchmark]) for a given number of [warmups] and [repeats].
|
||||
/// Collects execution time and observed memory.
|
||||
static Future<BenchmarkResult> runAsync({
|
||||
required UniversalChainAsyncBenchmark benchmark,
|
||||
required int warmups,
|
||||
|
||||
Reference in New Issue
Block a user