Skip to content

Commit

Permalink
longPolling 增加配置 handleRetry, 配置文件方式支持 retryhandleRetry 配置
Browse files Browse the repository at this point in the history
handleRetry 默认模式为 `TIMEOUT_ONLY` (与之前行为一致)
  • Loading branch information
ForteScarlet committed Sep 29, 2024
1 parent 3bbb681 commit 1b64c69
Show file tree
Hide file tree
Showing 4 changed files with 107 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ public inline fun getUpdateFlow(
eachLimit: Int? = null,
allowedUpdates: Collection<String>? = null,
crossinline onEachResult: (List<Update>) -> List<Update> = { it },
crossinline onError: (Throwable) -> List<Update> = {
crossinline onError: suspend (Throwable) -> List<Update> = {
if (it is HttpRequestTimeoutException) emptyList() else throw it
},
crossinline requestor: suspend (GetUpdatesApi) -> List<Update>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,12 +68,17 @@ public data class SerializableTelegramBotConfiguration(
val limit: Int? = null,
val timeout: Int? = BotConfiguration.DefaultLongPollingTimeout.inWholeSeconds.toInt(),
val allowedUpdates: Collection<String>? = null,
val retry: LongPolling.Retry? = null,
val handleRetry: LongPolling.HandleRetry? = null,
) {
public fun toBotLongPolling(): LongPolling = LongPolling(
limit = limit,
timeout = timeout,
allowedUpdates = allowedUpdates,
)
retry = retry,
).also {
this.handleRetry?.also { thisHr -> it.handleRetry = thisHr }
}
}

public fun toBotConfiguration(): TelegramBotConfiguration {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,6 +358,12 @@ public enum class SubscribeSequence {
* The difference is that [timeout] defaults to [DefaultLongPollingTimeout]
* instead of `null`.
*
* @property retry Retry config. [retry] is based on the Ktor plugin
* [HttpTimeout][io.ktor.client.plugins.HttpRequestRetry] implementation.
*
* @property handleRetry Retry configuration for exception handling for long polling flow.
* Unlike [retry], [handleRetry] is based on `onError` in [getUpdateFlow].
*
* @see getUpdateFlow
*/
@OptIn(InternalSimbotAPI::class)
Expand All @@ -366,15 +372,45 @@ public data class LongPolling(
val timeout: Int? = DefaultLongPollingTimeout.inWholeSeconds.toInt(),
val allowedUpdates: Collection<String>? = null,
// retry times on error
val retry: Retry? = null
val retry: Retry? = null,
) {
var handleRetry: HandleRetry = HandleRetry.DEFAULT

@Serializable
public data class Retry(
val maxRetries: Int = 3,
val delayMillis: Long = 5000,
val isDelayMillisMultiplyByRetryTimes: Boolean = false
)

@Serializable
public data class HandleRetry(
val strategy: HandleRetryStrategy = HandleRetryStrategy.TIMEOUT_ONLY,
val delayMillis: Long = 5000,
) {
public companion object {
internal val DEFAULT: HandleRetry = HandleRetry()
}
}


public enum class HandleRetryStrategy {
/**
* Throw every exception, no retry.
*/
NONE,

/**
* Catch [io.ktor.client.plugins.HttpRequestTimeoutException] only.
*/
TIMEOUT_ONLY,

/**
* Always retry (except [CancellationException]).
*/
ALL,
}

// multiplyBy
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,7 @@ internal class BotImpl(
}
}

@Suppress("ThrowsCount")
private suspend fun HttpClient.longPolling(
token: String,
server: Url?,
Expand All @@ -309,25 +310,76 @@ internal class BotImpl(
eachLimit = limit,
allowedUpdates = allowedUpdates,
onError = { error ->
when (error) {
is CancellationException -> {
eventLogger.trace("Handle an cancellation error on long polling task: {}", error.message, error)
// throw, and stop the job.
if (error is CancellationException) {
eventLogger.trace("Handle an cancellation error on long polling task: {}", error.message, error)
// throw, and stop the job.
throw error
}

val handleRetry = longPolling?.handleRetry ?: LongPolling.HandleRetry.DEFAULT
val strategy = handleRetry.strategy
val delay = handleRetry.delayMillis

when (strategy) {
LongPolling.HandleRetryStrategy.NONE -> {
eventLogger.error(
"Handle an error on long polling task " +
"with handle retry strategy {}: {}, " +
"bot will be shutdown.",
strategy,
error.message,
error
)
job.cancel(CancellationException("LongPolling on failure", error))
throw error
}

is HttpRequestTimeoutException -> {
eventLogger.trace("Handle an timeout error on long polling task: {}", error.message, error)
LongPolling.HandleRetryStrategy.TIMEOUT_ONLY -> {
if (error is HttpRequestTimeoutException) {
eventLogger.debug(
"Handle an timeout error " +
"on long polling task: {}, just re-poll.",
error.message,
error
)
} else {
eventLogger.error(
"Handle an error on long polling task " +
"with handle retry strategy {}: {}, " +
"bot will be shutdown.",
strategy,
error.message,
error
)
// throw to Bot
job.cancel(CancellationException("LongPolling on failure", error))
throw error
}
}

else -> {
eventLogger.error("Handle an error on long polling task: {}", error.message, error)
// throw to Bot
job.cancel(CancellationException("LongPolling on failure", error))
throw error
LongPolling.HandleRetryStrategy.ALL -> {
if (error is HttpRequestTimeoutException) {
eventLogger.debug(
"Handle an timeout error " +
"on long polling task: {}, just re-poll.",
error.message,
error
)
} else {
eventLogger.debug(
"Handle an error on long polling task " +
"with handle retry strategy {}: {}, " +
"retry in {} ms",
strategy,
error.message,
delay,
error
)
}
}
}

delay(delay)
emptyList()
}
) { api ->
Expand Down

0 comments on commit 1b64c69

Please sign in to comment.