diff --git a/docs/migration-v8.md b/docs/migration-v8.md index d8aa72f86e2..175d332de9d 100644 --- a/docs/migration-v8.md +++ b/docs/migration-v8.md @@ -186,7 +186,7 @@ ResiliencePipeline pipeline = new ResiliencePipelineBuilder() > [!IMPORTANT] -> In v7, the policy wrap ordering is different; the policy added first was executed last (FILO). In v8, the execution order matches the order in which they were added (FIFO). +> In v7, the policy wrap ordering is different; the policy added first was executed last (FILO). In v8, the execution order matches the order in which they were added (FIFO). See [fallback after retries](strategies/fallback.md#fallback-after-retries) for an example on how the strategies are executed. ## Migrating retry policies diff --git a/docs/strategies/circuit-breaker.md b/docs/strategies/circuit-breaker.md index 7e31bf926c5..cc01e099f00 100644 --- a/docs/strategies/circuit-breaker.md +++ b/docs/strategies/circuit-breaker.md @@ -97,7 +97,7 @@ await manualControl.CloseAsync(); - [Circuit Breaker Pattern by Microsoft](https://msdn.microsoft.com/en-us/library/dn589784.aspx) - [Original Circuit Breaking Article](https://web.archive.org/web/20160106203951/http://thatextramile.be/blog/2008/05/the-circuit-breaker) -## Patterns and anti-patterns +## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and those to avoid. @@ -263,7 +263,7 @@ circuitBreaker = new ResiliencePipelineBuilder() ✅ DO -The `CircuitBreakerStartegyOptions` currently do not support defining break durations dynamically. This may be re-evaluated in the future. For now, refer to the first example for a potential workaround. However, please use it with caution. +The `CircuitBreakerStrategyOptions` currently do not support defining break durations dynamically. This may be re-evaluated in the future. For now, refer to the first example for a potential workaround. However, please use it with caution. ### 3 - Wrapping each endpoint with a circuit breaker diff --git a/docs/strategies/fallback.md b/docs/strategies/fallback.md index f01f46f1d52..11cbae5e0c9 100644 --- a/docs/strategies/fallback.md +++ b/docs/strategies/fallback.md @@ -65,7 +65,54 @@ new ResiliencePipelineBuilder() | `FallbackAction` | `Null`, **Required** | Fallback action to be executed. | | `OnFallback` | `null` | Event that is raised when fallback happens. | -## Patterns and anti-patterns +## Patterns + +### Fallback after retries + +When designing resilient systems, a common pattern is to use a fallback after multiple failed retry attempts. This approach is especially relevant when a fallback strategy can provide a sensible default value. + + +```cs +// Define a common predicates re-used by both fallback and retries +var predicateBuilder = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError); + +var pipeline = new ResiliencePipelineBuilder() + .AddFallback(new() + { + ShouldHandle = predicateBuilder, + FallbackAction = args => + { + // Try to resolve the fallback response + HttpResponseMessage fallbackResponse = ResolveFallbackResponse(args.Outcome); + + return Outcome.FromResultAsValueTask(fallbackResponse); + } + }) + .AddRetry(new() + { + ShouldHandle = predicateBuilder, + MaxRetryAttempts = 3, + }) + .Build(); + +// Demonstrative execution that always produces invalid result +pipeline.Execute(() => new HttpResponseMessage(HttpStatusCode.InternalServerError)); +``` + + +Here's a breakdown of the behavior when the callback produces either an `HttpStatusCode.InternalServerError` or an `HttpRequestException`: + +- The fallback strategy initiates by executing the provided callback, then immediately passes the execution to the retry strategy. +- The retry strategy starts execution, makes 3 retry attempts and yields the outcome that represents an error. +- The fallback strategy resumes execution, assesses the outcome generated by the callback, and if necessary, supplies the fallback value. +- The fallback strategy completes its execution. + +> [!NOTE] +> The preceding example also demonstrates how to re-use `ResiliencePipelineBuilder` across multiple strategies. + +## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and ones to avoid. diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index ddaa5fabafb..330bf91d682 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -104,7 +104,45 @@ new ResiliencePipelineBuilder().AddRetry(new RetryStrategyOptions | `OnRetry` | `null` | Action executed when retry occurs. | | `MaxDelay` | `null` | Caps the calculated retry delay to a specified maximum duration. | -## Patterns and anti-patterns +## Patterns + +### Limiting the maximum delay + +In some cases, you might want to set a limit on the calculated delay. This is beneficial when multiple retries are anticipated, and you wish to prevent excessive wait times between these retries. + +Consider the following example of a long-running background job: + + +```cs +ResiliencePipeline pipeline = new ResiliencePipelineBuilder() + .AddRetry(new() + { + Delay = TimeSpan.FromSeconds(2), + MaxRetryAttempts = int.MaxValue, + BackoffType = DelayBackoffType.Exponential, + + // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. + MaxDelay = TimeSpan.FromMinutes(15), + UseJitter = true + }) + .Build(); + +// Background processing +while (!cancellationToken.IsCancellationRequested) +{ + await pipeline.ExecuteAsync(async token => + { + // In the event of a prolonged service outage, we can afford to wait for a successful retry since this is a background task. + await SynchronizeDataAsync(token); + }, + cancellationToken); + + await Task.Delay(TimeSpan.FromMinutes(30)); // The sync runs every 30 minutes. +} +``` + + +## Anti-patterns Over the years, many developers have used Polly in various ways. Some of these recurring patterns may not be ideal. This section highlights the recommended practices and those to avoid. @@ -480,38 +518,3 @@ var retry = new ResiliencePipelineBuilder() **Reasoning**: As previously mentioned, always use the designated area to define retry conditions. Re-frame your original exit conditions to specify when a retry should be initiated. - -### Limiting the maximum delay - -In some cases, you might want to set a limit on the calculated delay. This is beneficial when multiple retries are anticipated, and you wish to prevent excessive wait times between these retries. - -Consider the following example of a long-running background job: - - -```cs -ResiliencePipeline pipeline = new ResiliencePipelineBuilder() - .AddRetry(new() - { - Delay = TimeSpan.FromSeconds(2), - MaxRetryAttempts = int.MaxValue, - - // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. - MaxDelay = TimeSpan.FromMinutes(15), - UseJitter = true - }) - .Build(); - -// Background processing -while (!cancellationToken.IsCancellationRequested) -{ - await pipeline.ExecuteAsync(async token => - { - // In the event of a prolonged service outage, we can afford to wait for a successful retry since this is a background task. - await SynchronizeDataAsync(token); - }, - cancellationToken); - - await Task.Delay(TimeSpan.FromMinutes(30)); // The sync runs every 30 minutes. -} -``` - diff --git a/src/Snippets/Docs/Fallback.cs b/src/Snippets/Docs/Fallback.cs index 254db8127d7..74ead75f911 100644 --- a/src/Snippets/Docs/Fallback.cs +++ b/src/Snippets/Docs/Fallback.cs @@ -232,4 +232,40 @@ private static ValueTask ActionCore() return await pipeline.ExecuteAsync(CallExternalSystem, CancellationToken.None); #endregion } + + public static void FallbackAfterRetries() + { + #region fallback-after-retries + + // Define a common predicates re-used by both fallback and retries + var predicateBuilder = new PredicateBuilder() + .Handle() + .HandleResult(r => r.StatusCode == HttpStatusCode.InternalServerError); + + var pipeline = new ResiliencePipelineBuilder() + .AddFallback(new() + { + ShouldHandle = predicateBuilder, + FallbackAction = args => + { + // Try to resolve the fallback response + HttpResponseMessage fallbackResponse = ResolveFallbackResponse(args.Outcome); + + return Outcome.FromResultAsValueTask(fallbackResponse); + } + }) + .AddRetry(new() + { + ShouldHandle = predicateBuilder, + MaxRetryAttempts = 3, + }) + .Build(); + + // Demonstrative execution that always produces invalid result + pipeline.Execute(() => new HttpResponseMessage(HttpStatusCode.InternalServerError)); + + #endregion + } + + private static HttpResponseMessage ResolveFallbackResponse(Outcome outcome) => new(); } diff --git a/src/Snippets/Docs/Retry.cs b/src/Snippets/Docs/Retry.cs index cd74bb5da7d..fc3d4cecb65 100644 --- a/src/Snippets/Docs/Retry.cs +++ b/src/Snippets/Docs/Retry.cs @@ -120,6 +120,7 @@ public static async Task MaxDelay() { Delay = TimeSpan.FromSeconds(2), MaxRetryAttempts = int.MaxValue, + BackoffType = DelayBackoffType.Exponential, // Initially, we aim for an exponential backoff, but after a certain number of retries, we set a maximum delay of 15 minutes. MaxDelay = TimeSpan.FromMinutes(15),