Add benchmark for backtest on the test

This commit is contained in:
2025-11-11 11:23:30 +07:00
parent 2ca77bc2f9
commit 14d101b63e
8 changed files with 360 additions and 43 deletions

View File

@@ -0,0 +1,70 @@
# Benchmark Backtest Performance
This command runs the backtest performance tests and records the results in the performance benchmark CSV file.
## Usage
Run this command to benchmark backtest performance and update the tracking CSV:
```
/benchmark-backtest-performance
```
Or run the script directly:
```bash
./scripts/benchmark-backtest-performance.sh
```
## What it does
1. Runs the performance telemetry test (`ExecuteBacktest_With_Large_Dataset_Should_Show_Performance_Telemetry`)
2. Extracts performance metrics from the test output
3. Appends a new row to `src/Managing.Workers.Tests/performance-benchmarks.csv`
## CSV Format
The CSV file contains the following columns:
- `DateTime`: ISO 8601 timestamp when the benchmark was run
- `TestName`: Name of the test that was executed
- `CandlesCount`: Number of candles processed
- `ExecutionTimeSeconds`: Total execution time in seconds
- `ProcessingRateCandlesPerSec`: Candles processed per second
- `MemoryStartMB`: Memory usage at start
- `MemoryEndMB`: Memory usage at end
- `MemoryPeakMB`: Peak memory usage
- `SignalUpdatesCount`: Total signal updates performed
- `SignalUpdatesSkipped`: Number of signal updates skipped
- `SignalUpdateEfficiencyPercent`: Percentage of signal updates that were skipped
- `BacktestStepsCount`: Number of backtest steps executed
- `AverageSignalUpdateMs`: Average time per signal update
- `AverageBacktestStepMs`: Average time per backtest step
- `FinalPnL`: Final profit and loss
- `WinRatePercent`: Win rate percentage
- `GrowthPercentage`: Growth percentage
- `Score`: Backtest score
- `CommitHash`: Git commit hash
- `GitBranch`: Git branch name
- `Environment`: Environment where test was run
## Implementation Details
The command uses regex patterns to extract metrics from the test console output and formats them into CSV rows. It automatically detects the current git branch and commit hash for tracking.
## Example Output
```
🚀 Running backtest performance benchmark...
📊 Test Results:
• Processing Rate: 2,684.8 candles/sec
• Execution Time: 2.15s
• Memory Usage: 24.24MB peak
• Signal Efficiency: 33.1%
✅ Benchmark data recorded in performance-benchmarks.csv
```
## Files Modified
- `src/Managing.Workers.Tests/performance-benchmarks.csv` - Performance tracking data

View File

@@ -0,0 +1,112 @@
#!/bin/bash
# Benchmark Backtest Performance Script
# This script runs backtest performance tests and records results in CSV
set -e # Exit on any error
echo "🚀 Running backtest performance benchmark..."
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Function to extract value from test output using regex
extract_value() {
local pattern="$1"
local text="$2"
echo "$text" | grep -o "$pattern" | head -1 | sed 's/.*: //' | sed 's/[^0-9.]*$//' | tr -d ','
}
# Get current timestamp
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
# Get git information
COMMIT_HASH=$(git rev-parse --short HEAD 2>/dev/null || echo "unknown")
BRANCH_NAME=$(git branch --show-current 2>/dev/null || echo "unknown")
ENVIRONMENT="development"
# Run the performance test and capture output
echo "📊 Running performance test..."
TEST_OUTPUT=$(dotnet test src/Managing.Workers.Tests/Managing.Workers.Tests.csproj \
--filter "ExecuteBacktest_With_Large_Dataset_Should_Show_Performance_Telemetry" \
--verbosity minimal \
--logger "console;verbosity=detailed" 2>&1)
# Check if test passed
if echo "$TEST_OUTPUT" | grep -q "Passed.*1"; then
echo -e "${GREEN}✅ Test passed!${NC}"
else
echo -e "${RED}❌ Test failed!${NC}"
echo "$TEST_OUTPUT"
exit 1
fi
# Extract performance metrics from the output
CANDLES_COUNT=$(echo "$TEST_OUTPUT" | grep "📈 Total Candles Processed:" | sed 's/.*: //' | sed 's// /' | tr -d ',' | xargs)
EXECUTION_TIME=$(echo "$TEST_OUTPUT" | grep "⏱️ Total Execution Time:" | sed 's/.*: //' | sed 's/s//' | sed 's/,/./g' | xargs)
PROCESSING_RATE=$(echo "$TEST_OUTPUT" | grep "🚀 Processing Rate:" | sed 's/.*: //' | sed 's/ candles\/sec//' | sed 's/,/./g' | xargs)
# Extract memory metrics
MEMORY_LINE=$(echo "$TEST_OUTPUT" | grep "💾 Memory Usage:")
MEMORY_START=$(echo "$MEMORY_LINE" | sed 's/.*Start=//' | sed 's/MB.*//' | xargs)
MEMORY_END=$(echo "$MEMORY_LINE" | sed 's/.*End=//' | sed 's/MB.*//' | xargs)
MEMORY_PEAK=$(echo "$MEMORY_LINE" | sed 's/.*Peak=//' | sed 's/MB.*//' | xargs)
# Extract signal update metrics
SIGNAL_LINE=$(echo "$TEST_OUTPUT" | grep "• Signal Updates:")
SIGNAL_UPDATES=$(echo "$SIGNAL_LINE" | sed 's/.*Signal Updates: //' | sed 's/ms.*//' | xargs)
SIGNAL_SKIPPED=$(echo "$SIGNAL_LINE" | grep -o "[0-9,]* skipped" | sed 's/ skipped//' | tr -d ',' | xargs)
SIGNAL_EFFICIENCY=$(echo "$SIGNAL_LINE" | grep -o "[0-9.]*% efficiency" | sed 's/% efficiency//' | xargs)
# Extract backtest steps
BACKTEST_LINE=$(echo "$TEST_OUTPUT" | grep "• Backtest Steps:")
BACKTEST_STEPS=$(echo "$BACKTEST_LINE" | sed 's/.*Backtest Steps: //' | sed 's/ms.*//' | xargs)
# Extract timing metrics
AVG_SIGNAL_UPDATE=$(echo "$TEST_OUTPUT" | grep "• Average Signal Update:" | sed 's/.*Average Signal Update: //' | sed 's/ms.*//' | xargs)
AVG_BACKTEST_STEP=$(echo "$TEST_OUTPUT" | grep "• Average Backtest Step:" | sed 's/.*Average Backtest Step: //' | sed 's/ms.*//' | xargs)
# Extract trading results
FINAL_PNL=$(echo "$TEST_OUTPUT" | grep "• Final PnL:" | sed 's/.*Final PnL: //' | xargs)
WIN_RATE=$(echo "$TEST_OUTPUT" | grep "• Win Rate:" | sed 's/.*Win Rate: //' | sed 's/%//' | xargs)
GROWTH_PERCENTAGE=$(echo "$TEST_OUTPUT" | grep "• Growth:" | sed 's/.*Growth: //' | sed 's/%//' | xargs)
SCORE=$(echo "$TEST_OUTPUT" | grep "• Score:" | sed 's/.*Score: //' | xargs)
# Set defaults for missing values
CANDLES_COUNT=${CANDLES_COUNT:-0}
EXECUTION_TIME=${EXECUTION_TIME:-0}
PROCESSING_RATE=${PROCESSING_RATE:-0}
MEMORY_START=${MEMORY_START:-0}
MEMORY_END=${MEMORY_END:-0}
MEMORY_PEAK=${MEMORY_PEAK:-0}
SIGNAL_UPDATES=${SIGNAL_UPDATES:-0}
SIGNAL_SKIPPED=${SIGNAL_SKIPPED:-0}
SIGNAL_EFFICIENCY=${SIGNAL_EFFICIENCY:-0}
BACKTEST_STEPS=${BACKTEST_STEPS:-0}
AVG_SIGNAL_UPDATE=${AVG_SIGNAL_UPDATE:-0}
AVG_BACKTEST_STEP=${AVG_BACKTEST_STEP:-0}
FINAL_PNL=${FINAL_PNL:-0}
WIN_RATE=${WIN_RATE:-0}
GROWTH_PERCENTAGE=${GROWTH_PERCENTAGE:-0}
SCORE=${SCORE:-0}
# Create CSV row
CSV_ROW="$TIMESTAMP,ExecuteBacktest_With_Large_Dataset_Should_Show_Performance_Telemetry,$CANDLES_COUNT,$EXECUTION_TIME,$PROCESSING_RATE,$MEMORY_START,$MEMORY_END,$MEMORY_PEAK,$SIGNAL_UPDATES,$SIGNAL_SKIPPED,$SIGNAL_EFFICIENCY,$BACKTEST_STEPS,$AVG_SIGNAL_UPDATE,$AVG_BACKTEST_STEP,$FINAL_PNL,$WIN_RATE,$GROWTH_PERCENTAGE,$SCORE,$COMMIT_HASH,$BRANCH_NAME,$ENVIRONMENT"
# Append to CSV file
echo "$CSV_ROW" >> "src/Managing.Workers.Tests/performance-benchmarks.csv"
# Display results
echo -e "${BLUE}📊 Benchmark Results:${NC}"
echo " • Processing Rate: $PROCESSING_RATE candles/sec"
echo " • Execution Time: $EXECUTION_TIME seconds"
echo " • Memory Peak: $MEMORY_PEAK MB"
echo " • Signal Efficiency: $SIGNAL_EFFICIENCY%"
echo " • Candles Processed: $CANDLES_COUNT"
echo " • Score: $SCORE"
echo -e "${GREEN}✅ Benchmark data recorded successfully!${NC}"

View File

@@ -230,6 +230,9 @@ public class BacktestExecutor
var fixedCandlesHashSet = new HashSet<Candle>(rollingWindowSize); // Reuse HashSet to avoid allocations var fixedCandlesHashSet = new HashSet<Candle>(rollingWindowSize); // Reuse HashSet to avoid allocations
var candlesProcessed = 0; var candlesProcessed = 0;
// Pre-allocate reusable collections to minimize allocations during processing
var tempCandlesList = new List<Candle>(rollingWindowSize);
// Signal caching optimization - reduce signal update frequency for better performance // Signal caching optimization - reduce signal update frequency for better performance
var signalUpdateSkipCount = 0; var signalUpdateSkipCount = 0;
@@ -253,26 +256,39 @@ public class BacktestExecutor
// Process all candles with optimized rolling window approach // Process all candles with optimized rolling window approach
_logger.LogInformation("🎯 Starting to process {Count} candles in loop", orderedCandles.Count); _logger.LogInformation("🎯 Starting to process {Count} candles in loop", orderedCandles.Count);
Console.WriteLine("CONSOLE: About to start candle processing loop"); Console.WriteLine("CONSOLE: About to start candle processing loop");
// Optimize: Pre-populate rolling window with initial candles to avoid repeated checks
var initialWindowSize = Math.Min(rollingWindowSize, orderedCandles.Count);
for (int i = 0; i < initialWindowSize; i++)
{
var candle = orderedCandles[i];
rollingCandles.Add(candle);
fixedCandlesHashSet.Add(candle);
}
foreach (var candle in orderedCandles) foreach (var candle in orderedCandles)
{ {
// Maintain rolling window efficiently using List // Optimized rolling window maintenance - only modify when window is full
rollingCandles.Add(candle); if (rollingCandles.Count >= rollingWindowSize)
if (rollingCandles.Count > rollingWindowSize)
{ {
// Remove oldest candle from both structures // Remove oldest candle from both structures efficiently
var removedCandle = rollingCandles[0]; var removedCandle = rollingCandles[0];
rollingCandles.RemoveAt(0); rollingCandles.RemoveAt(0);
fixedCandlesHashSet.Remove(removedCandle); fixedCandlesHashSet.Remove(removedCandle);
} }
// Add to HashSet for reuse // Add new candle to rolling window (skip if already in initial population)
if (!fixedCandlesHashSet.Contains(candle))
{
rollingCandles.Add(candle);
fixedCandlesHashSet.Add(candle); fixedCandlesHashSet.Add(candle);
}
tradingBot.LastCandle = candle; tradingBot.LastCandle = candle;
// Smart signal caching - reduce signal update frequency for performance // Smart signal caching - reduce signal update frequency for performance
// RSI and similar indicators don't need updates every candle for 15-minute data // RSI and similar indicators don't need updates every candle for 15-minute data
var shouldSkipSignalUpdate = ShouldSkipSignalUpdate(currentCandle, totalCandles); var shouldSkipSignalUpdate = ShouldSkipSignalUpdate(currentCandle, totalCandles, config);
if (currentCandle <= 5) // Debug first few candles if (currentCandle <= 5) // Debug first few candles
{ {
_logger.LogInformation("🔍 Candle {CurrentCandle}: shouldSkip={ShouldSkip}, totalCandles={Total}", _logger.LogInformation("🔍 Candle {CurrentCandle}: shouldSkip={ShouldSkip}, totalCandles={Total}",
@@ -533,24 +549,70 @@ public class BacktestExecutor
} }
/// <summary> /// <summary>
/// Advanced signal caching based on indicator update frequency /// Advanced signal caching based on indicator update frequency and timeframe
/// Instead of hashing candles, we cache signals based on how often indicators need updates /// Dynamically adjusts update frequency based on timeframe and indicator characteristics
/// </summary> /// </summary>
private bool ShouldSkipSignalUpdate(int currentCandleIndex, int totalCandles) private bool ShouldSkipSignalUpdate(int currentCandleIndex, int totalCandles, TradingBotConfig config)
{ {
// RSI and similar indicators don't need to be recalculated every candle
// For 15-minute candles, we can update signals every 3-5 candles without significant accuracy loss
const int signalUpdateFrequency = 3; // Update signals every N candles
// Always update signals for the first few candles to establish baseline // Always update signals for the first few candles to establish baseline
if (currentCandleIndex < 10) if (currentCandleIndex < 20)
return false; return false;
// Always update signals near the end to ensure final trades are calculated // Always update signals near the end to ensure final trades are calculated
if (currentCandleIndex > totalCandles - 10) if (currentCandleIndex > totalCandles - 20)
return false; return false;
// Skip signal updates based on frequency // Adaptive update frequency based on timeframe
// Shorter timeframes can skip more updates as they're more volatile
int signalUpdateFrequency;
switch (config.Timeframe)
{
case Timeframe.OneMinute:
case Timeframe.FiveMinutes:
signalUpdateFrequency = 10; // Update every 10 candles for fast timeframes
break;
case Timeframe.FifteenMinutes:
case Timeframe.ThirtyMinutes:
signalUpdateFrequency = 5; // Update every 5 candles for medium timeframes
break;
case Timeframe.OneHour:
case Timeframe.FourHour:
signalUpdateFrequency = 3; // Update every 3 candles for slower timeframes
break;
case Timeframe.OneDay:
signalUpdateFrequency = 1; // Update every candle for daily (already slow)
break;
default:
signalUpdateFrequency = 5; // Default fallback
break;
}
// Further optimize based on indicator types in the scenario
if (config.Scenario?.Indicators != null)
{
var hasFastIndicators = config.Scenario.Indicators.Any(ind =>
ind.Type == IndicatorType.RsiDivergence ||
ind.Type == IndicatorType.StochRsiTrend ||
ind.Type == IndicatorType.MacdCross);
var hasSlowIndicators = config.Scenario.Indicators.Any(ind =>
ind.Type == IndicatorType.EmaCross ||
ind.Type == IndicatorType.EmaTrend ||
ind.Type == IndicatorType.SuperTrend);
// If we have mostly slow indicators, we can update less frequently
if (!hasFastIndicators && hasSlowIndicators)
{
signalUpdateFrequency = Math.Max(signalUpdateFrequency, 8);
}
// If we have fast indicators, we need more frequent updates
else if (hasFastIndicators && !hasSlowIndicators)
{
signalUpdateFrequency = Math.Min(signalUpdateFrequency, 3);
}
}
// Skip signal updates based on calculated frequency
return (currentCandleIndex % signalUpdateFrequency) != 0; return (currentCandleIndex % signalUpdateFrequency) != 0;
} }

View File

@@ -1,4 +1,5 @@
using Managing.Application.Abstractions.Repositories; using System.Collections.Concurrent;
using Managing.Application.Abstractions.Repositories;
using Managing.Application.Abstractions.Services; using Managing.Application.Abstractions.Services;
using Managing.Domain.Accounts; using Managing.Domain.Accounts;
using Managing.Domain.Bots; using Managing.Domain.Bots;
@@ -431,6 +432,7 @@ public class TradingService : ITradingService
/// <summary> /// <summary>
/// Calculates indicators values for a given scenario and candles. /// Calculates indicators values for a given scenario and candles.
/// Uses parallel processing for independent indicator calculations to improve performance.
/// </summary> /// </summary>
/// <param name="scenario">The scenario containing indicators.</param> /// <param name="scenario">The scenario containing indicators.</param>
/// <param name="candles">The candles to calculate indicators for.</param> /// <param name="candles">The candles to calculate indicators for.</param>
@@ -439,7 +441,7 @@ public class TradingService : ITradingService
Scenario scenario, Scenario scenario,
HashSet<Candle> candles) HashSet<Candle> candles)
{ {
// Offload CPU-bound indicator calculations to thread pool // Offload CPU-bound indicator calculations to thread pool with parallel processing
return await Task.Run(() => return await Task.Run(() =>
{ {
var indicatorsValues = new Dictionary<IndicatorType, IndicatorsResultBase>(); var indicatorsValues = new Dictionary<IndicatorType, IndicatorsResultBase>();
@@ -449,19 +451,39 @@ public class TradingService : ITradingService
return indicatorsValues; return indicatorsValues;
} }
// Build indicators from scenario // Use parallel processing for independent indicator calculations
foreach (var indicator in scenario.Indicators) // Configure parallelism based on indicator count and system capabilities
var maxDegreeOfParallelism = Math.Min(scenario.Indicators.Count, Environment.ProcessorCount);
var options = new ParallelOptions
{
MaxDegreeOfParallelism = maxDegreeOfParallelism,
CancellationToken = CancellationToken.None
};
// Use thread-safe concurrent dictionary for parallel writes
var concurrentResults = new ConcurrentDictionary<IndicatorType, IndicatorsResultBase>();
// Parallel calculation of indicators
Parallel.ForEach(scenario.Indicators, options, indicator =>
{ {
try try
{ {
var buildedIndicator = ScenarioHelpers.BuildIndicator(ScenarioHelpers.BaseToLight(indicator)); var buildedIndicator = ScenarioHelpers.BuildIndicator(ScenarioHelpers.BaseToLight(indicator));
indicatorsValues[indicator.Type] = buildedIndicator.GetIndicatorValues(candles); var result = buildedIndicator.GetIndicatorValues(candles);
concurrentResults[indicator.Type] = result;
} }
catch (Exception ex) catch (Exception ex)
{ {
_logger.LogError(ex, "Error calculating indicator {IndicatorName}: {ErrorMessage}", _logger.LogError(ex, "Error calculating indicator {IndicatorName}: {ErrorMessage}",
indicator.Name, ex.Message); indicator.Name, ex.Message);
} }
});
// Convert to regular dictionary for return
foreach (var kvp in concurrentResults)
{
indicatorsValues[kvp.Key] = kvp.Value;
} }
return indicatorsValues; return indicatorsValues;

View File

@@ -21,7 +21,7 @@ public class BundleBacktestHealthCheckWorker : BackgroundService
private readonly IServiceScopeFactory _scopeFactory; private readonly IServiceScopeFactory _scopeFactory;
private readonly ILogger<BundleBacktestHealthCheckWorker> _logger; private readonly ILogger<BundleBacktestHealthCheckWorker> _logger;
private readonly TimeSpan _checkInterval = TimeSpan.FromMinutes(30); private readonly TimeSpan _checkInterval = TimeSpan.FromMinutes(30);
private readonly TimeSpan _inactiveThreshold = TimeSpan.FromMinutes(30); // Check bundles inactive for 30+ minutes private readonly TimeSpan _inactiveThreshold = TimeSpan.FromMinutes(2); // Check bundles inactive for 2+ minutes
private readonly TimeSpan _stuckThreshold = TimeSpan.FromHours(2); // Consider bundle stuck if no progress for 2 hours private readonly TimeSpan _stuckThreshold = TimeSpan.FromHours(2); // Consider bundle stuck if no progress for 2 hours
private readonly IMessengerService _messengerService; private readonly IMessengerService _messengerService;
@@ -90,8 +90,8 @@ public class BundleBacktestHealthCheckWorker : BackgroundService
.ToList(); .ToList();
_logger.LogInformation( _logger.LogInformation(
"Found {TotalCount} bundles (from {PendingTotal} pending and {RunningTotal} running) that haven't been updated in >30 minutes", "Found {TotalCount} bundles (from {PendingTotal} pending and {RunningTotal} running) that haven't been updated in >{InactiveMinutes} minutes",
allBundlesToCheck.Count, pendingBundles.Count(), runningBundles.Count()); allBundlesToCheck.Count, pendingBundles.Count(), runningBundles.Count(), _inactiveThreshold.TotalMinutes);
var stuckBundlesCount = 0; var stuckBundlesCount = 0;
var missingJobsCount = 0; var missingJobsCount = 0;
@@ -178,6 +178,18 @@ public class BundleBacktestHealthCheckWorker : BackgroundService
} }
} }
// Check 4: Bundle with all jobs completed but bundle status not updated
var completedJobs = jobs.Count(j => j.Status == JobStatus.Completed);
var failedJobs = jobs.Count(j => j.Status == JobStatus.Failed);
var totalProcessedJobs = completedJobs + failedJobs;
if (totalProcessedJobs == bundle.TotalBacktests &&
(bundle.Status == BundleBacktestRequestStatus.Running || bundle.Status == BundleBacktestRequestStatus.Pending))
{
await HandleCompletedBundleAsync(bundle, completedJobs, failedJobs, backtestRepository);
return (StuckCount: 0, MissingJobsCount: 0, HealthyCount: 1);
}
return (StuckCount: 0, MissingJobsCount: 0, HealthyCount: 1); return (StuckCount: 0, MissingJobsCount: 0, HealthyCount: 1);
} }
@@ -471,6 +483,39 @@ public class BundleBacktestHealthCheckWorker : BackgroundService
bundle.RequestId, bundle.Status); bundle.RequestId, bundle.Status);
} }
private async Task HandleCompletedBundleAsync(
BundleBacktestRequest bundle,
int completedJobs,
int failedJobs,
IBacktestRepository backtestRepository)
{
_logger.LogInformation(
"✅ Bundle {BundleRequestId} has all jobs finished ({Completed} completed, {Failed} failed) but bundle status was {OldStatus}. Updating to Completed.",
bundle.RequestId, completedJobs, failedJobs, bundle.Status);
// Update bundle status to Completed (or keep as Completed if it was already)
bundle.Status = failedJobs == 0 ? BundleBacktestRequestStatus.Completed : BundleBacktestRequestStatus.Completed;
bundle.CompletedBacktests = completedJobs;
bundle.FailedBacktests = failedJobs;
bundle.CompletedAt = DateTime.UtcNow;
bundle.UpdatedAt = DateTime.UtcNow;
if (failedJobs > 0)
{
bundle.ErrorMessage = $"{failedJobs} backtests failed";
}
else
{
bundle.ErrorMessage = null; // Clear any previous error message
}
await backtestRepository.UpdateBundleBacktestRequestAsync(bundle);
_logger.LogInformation(
"Successfully updated bundle {BundleRequestId} status to {Status} with {Completed}/{Total} backtests completed",
bundle.RequestId, bundle.Status, bundle.CompletedBacktests, bundle.TotalBacktests);
}
private async Task HandleStalePendingBundleAsync( private async Task HandleStalePendingBundleAsync(
BundleBacktestRequest bundle, BundleBacktestRequest bundle,
TimeSpan timeSinceCreation, TimeSpan timeSinceCreation,

View File

@@ -74,11 +74,14 @@ public static class TradingBox
Dictionary<string, LightSignal> previousSignal, IndicatorComboConfig config, int? loopbackPeriod, Dictionary<string, LightSignal> previousSignal, IndicatorComboConfig config, int? loopbackPeriod,
Dictionary<IndicatorType, IndicatorsResultBase> preCalculatedIndicatorValues) Dictionary<IndicatorType, IndicatorsResultBase> preCalculatedIndicatorValues)
{ {
var signalOnCandles = new List<LightSignal>(); // Pre-allocate with estimated capacity to reduce reallocations
// Optimize list creation - avoid redundant allocations var signalOnCandles = new List<LightSignal>(Math.Min(newCandles.Count, 100));
var limitedCandles = newCandles.Count <= 600
? newCandles.OrderBy(c => c.Date).ToList() // Optimize candle ordering - reuse existing sorted data when possible
: newCandles.OrderBy(c => c.Date).TakeLast(600).ToList(); var orderedCandles = newCandles.OrderBy(c => c.Date).ToList();
var limitedCandles = orderedCandles.Count <= 600
? orderedCandles
: orderedCandles.GetRange(orderedCandles.Count - 600, 600);
foreach (var indicator in lightScenario.Indicators) foreach (var indicator in lightScenario.Indicators)
{ {
@@ -112,10 +115,9 @@ public static class TradingBox
continue; continue;
} }
// Ensure limitedCandles is ordered chronologically // Ensure limitedCandles is ordered chronologically (already ordered from previous step)
var orderedCandles = limitedCandles.OrderBy(c => c.Date).ToList();
var loopback = loopbackPeriod.HasValue && loopbackPeriod > 1 ? loopbackPeriod.Value : 1; var loopback = loopbackPeriod.HasValue && loopbackPeriod > 1 ? loopbackPeriod.Value : 1;
var candleLoopback = orderedCandles.TakeLast(loopback).ToList(); var candleLoopback = limitedCandles.TakeLast(loopback).ToList();
if (!candleLoopback.Any()) if (!candleLoopback.Any())
{ {

View File

@@ -16,6 +16,9 @@
<None Update="Data\ETH-FifteenMinutes-candles-20:44:15 +00:00-.json"> <None Update="Data\ETH-FifteenMinutes-candles-20:44:15 +00:00-.json">
<CopyToOutputDirectory>Always</CopyToOutputDirectory> <CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None> </None>
<None Update="performance-benchmarks.csv">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</None>
</ItemGroup> </ItemGroup>
<ItemGroup> <ItemGroup>

View File

@@ -7,11 +7,12 @@
}, },
"WorkerBacktestCompute": true, "WorkerBacktestCompute": true,
"BacktestComputeWorker": { "BacktestComputeWorker": {
"MaxConcurrentPerUser": 6, "MaxConcurrentPerUser": 8,
"MaxConcurrentPerInstance": 6, "MaxConcurrentPerInstance": 8,
"JobPollIntervalSeconds": 5, "JobPollIntervalSeconds": 3,
"HeartbeatIntervalSeconds": 30, "HeartbeatIntervalSeconds": 30,
"StaleJobTimeoutMinutes": 5 "StaleJobTimeoutMinutes": 5,
"JobTimeoutMinutes": 45
}, },
"WorkerGeneticCompute": true, "WorkerGeneticCompute": true,
"GeneticComputeWorker": { "GeneticComputeWorker": {