Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make TransactionalProducer implementation working #196

Open
wants to merge 5 commits into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
using Akka.Streams.Kafka.Helpers;
using Akka.Streams.Kafka.Messages;
using Akka.Streams.Kafka.Settings;
using Akka.Streams.TestKit;
using Confluent.Kafka;
using FluentAssertions;
using Xunit;
Expand All @@ -22,42 +23,42 @@ public TransactionalIntegrationTests(ITestOutputHelper output, KafkaFixture fixt
{
}

[Fact(Skip = "Missing producer transactions support, see https://github.com/akkadotnet/Akka.Streams.Kafka/issues/85")]
[Fact]
public async Task Transactional_source_with_sink_Should_work()
{
var settings = CreateConsumerSettings<string>(CreateGroup(1));
var consumerSettings = CreateConsumerSettings<string>(CreateGroup(1));
var sourceTopic = CreateTopic(1);
var targetTopic = CreateTopic(2);
var transactionalId = Guid.NewGuid().ToString();
const int totalMessages = 10;

var control = KafkaConsumer.TransactionalSource(settings, Subscriptions.Topics(sourceTopic))
.Via(Business<TransactionalMessage<Null, string>>())

await ProduceStrings(sourceTopic, Enumerable.Range(1, totalMessages), ProducerSettings);

var control = KafkaConsumer.TransactionalSource(consumerSettings, Subscriptions.Topics(sourceTopic))
.Select(message =>
{
return ProducerMessage.Single(
new ProducerRecord<Null, string>(targetTopic, message.Record.Key, message.Record.Value),
new ProducerRecord<Null, string>(targetTopic, message.Record.Message.Key, message.Record.Message.Value),
passThrough: message.PartitionOffset);
})
.ToMaterialized(KafkaProducer.TransactionalSink(ProducerSettings, transactionalId), Keep.Both)
.MapMaterializedValue(DrainingControl<NotUsed>.Create)
.Run(Materializer);

var consumer = ConsumeStrings(targetTopic, totalMessages);
var consumer = ConsumeStrings(targetTopic, totalMessages, CreateConsumerSettings<Null, string>(CreateGroup(2)));

await ProduceStrings(sourceTopic, Enumerable.Range(1, totalMessages), ProducerSettings);

AssertTaskCompletesWithin(TimeSpan.FromSeconds(totalMessages), consumer.IsShutdown);
AssertTaskCompletesWithin(TimeSpan.FromSeconds(totalMessages), control.DrainAndShutdown());
AssertTaskCompletesWithin(TimeSpan.FromSeconds(30), consumer.IsShutdown);
AssertTaskCompletesWithin(TimeSpan.FromSeconds(30), control.DrainAndShutdown());

consumer.DrainAndShutdown().Result.Should().HaveCount(totalMessages);
}

private Flow<T, T, NotUsed> Business<T>() => Flow.Create<T>();

private DrainingControl<IImmutableList<ConsumeResult<Null, string>>> ConsumeStrings(string topic, int count)
private DrainingControl<IImmutableList<ConsumeResult<Null, string>>> ConsumeStrings(
string topic,
int count,
ConsumerSettings<Null, string> settings)
{
return KafkaConsumer.PlainSource(CreateConsumerSettings<string>(CreateGroup(1)), Subscriptions.Topics(topic))
return KafkaConsumer.PlainSource(settings, Subscriptions.Topics(topic))
.Take(count)
.ToMaterialized(Sink.Seq<ConsumeResult<Null, string>>(), Keep.Both)
.MapMaterializedValue(DrainingControl<IImmutableList<ConsumeResult<Null, string>>>.Create)
Expand Down
6 changes: 0 additions & 6 deletions src/Akka.Streams.Kafka/Dsl/KafkaProducer.cs
Original file line number Diff line number Diff line change
Expand Up @@ -211,13 +211,10 @@ public static FlowWithContext<C, IEnvelope<K, V, NotUsed>, C, IResults<K, V, C>,
}

/// <summary>
/// API IS FOR INTERNAL USAGE: see https://github.com/akkadotnet/Akka.Streams.Kafka/issues/85
///
/// Publish records to Kafka topics and then continue the flow. The flow can only be used with a <see cref="KafkaConsumer.TransactionalSource{K,V}"/> that
/// emits a <see cref="TransactionalMessage{K,V}"/>. The flow requires a unique `transactional.id` across all app
/// instances. The flow will override producer properties to enable Kafka exactly-once transactional support.
/// </summary>
[InternalApi]
public static Flow<IEnvelope<K, V, GroupTopicPartitionOffset>, IResults<K, V, GroupTopicPartitionOffset>, NotUsed> TransactionalFlow<K, V>(
ProducerSettings<K, V> setting,
string transactionalId)
Expand All @@ -237,12 +234,9 @@ public static Flow<IEnvelope<K, V, GroupTopicPartitionOffset>, IResults<K, V, Gr
}

/// <summary>
/// API IS FOR INTERNAL USAGE: see https://github.com/akkadotnet/Akka.Streams.Kafka/issues/85
///
/// Sink that is aware of the <see cref="TransactionalMessage{K,V}.PartitionOffset"/> from a <see cref="KafkaConsumer.TransactionalSource{K,V}"/>.
/// It will initialize, begin, produce, and commit the consumer offset as part of a transaction.
/// </summary>
[InternalApi]
public static Sink<IEnvelope<K, V, GroupTopicPartitionOffset>, Task> TransactionalSink<K, V>(
ProducerSettings<K, V> settings,
string transactionalId)
Expand Down
20 changes: 18 additions & 2 deletions src/Akka.Streams.Kafka/Messages/CommittedMarker.cs
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,33 @@ internal sealed class PartitionOffsetCommittedMarker : GroupTopicPartitionOffset
/// Committed marker
/// </summary>
public ICommittedMarker CommittedMarker { get; }
/// <summary>
/// Consumer group metadata
/// </summary>
public IConsumerGroupMetadata ConsumerGroupMetadata { get; }

public PartitionOffsetCommittedMarker(string groupId, string topic, int partition, Offset offset, ICommittedMarker committedMarker)
public PartitionOffsetCommittedMarker(
string groupId,
string topic,
int partition,
Offset offset,
ICommittedMarker committedMarker,
IConsumerGroupMetadata consumerGroupMetadata)
: base(groupId, topic, partition, offset)
{
CommittedMarker = committedMarker;
ConsumerGroupMetadata = consumerGroupMetadata;
}

public PartitionOffsetCommittedMarker(GroupTopicPartition groupTopicPartition, Offset offset, ICommittedMarker committedMarker)
public PartitionOffsetCommittedMarker(
GroupTopicPartition groupTopicPartition,
Offset offset,
ICommittedMarker committedMarker,
IConsumerGroupMetadata consumerGroupMetadata)
: base(groupTopicPartition, offset)
{
CommittedMarker = committedMarker;
ConsumerGroupMetadata = consumerGroupMetadata;
}
}
}
21 changes: 18 additions & 3 deletions src/Akka.Streams.Kafka/Settings/ProducerSettings.cs
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,35 @@ namespace Akka.Streams.Kafka.Settings
{
public sealed class ProducerSettings<TKey, TValue>
{
public ProducerSettings(ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer, int parallelism,
string dispatcherId, TimeSpan flushTimeout, TimeSpan eosCommitInterval,
IImmutableDictionary<string, string> properties)
public ProducerSettings(
ISerializer<TKey> keySerializer,
ISerializer<TValue> valueSerializer,
int parallelism,
string dispatcherId,
TimeSpan flushTimeout,
TimeSpan eosCommitInterval,
TimeSpan maxBlock,
IImmutableDictionary<string, string> properties)
{
KeySerializer = keySerializer;
ValueSerializer = valueSerializer;
Parallelism = parallelism;
DispatcherId = dispatcherId;
FlushTimeout = flushTimeout;
EosCommitInterval = eosCommitInterval;
MaxBlock = maxBlock;
Properties = properties;
}

public ISerializer<TKey> KeySerializer { get; }
public ISerializer<TValue> ValueSerializer { get; }
public int Parallelism { get; }
public string DispatcherId { get; }
/// <summary>
/// Configures how long producer methods can block.
/// See also: https://docs.confluent.io/platform/current/installation/configuration/producer-configs.html#producerconfigs_max.block.ms
/// </summary>
public TimeSpan MaxBlock { get; }
public TimeSpan FlushTimeout { get; }
/// <summary>
/// The time interval to commit a transaction when using the `Transactional.sink` or `Transactional.flow`.
Expand Down Expand Up @@ -69,6 +81,7 @@ private ProducerSettings<TKey, TValue> Copy(
string dispatcherId = null,
TimeSpan? flushTimeout = null,
TimeSpan? eosCommitInterval = null,
TimeSpan? maxBlock = null,
IImmutableDictionary<string, string> properties = null) =>
new ProducerSettings<TKey, TValue>(
keySerializer: keySerializer ?? this.KeySerializer,
Expand All @@ -77,6 +90,7 @@ private ProducerSettings<TKey, TValue> Copy(
dispatcherId: dispatcherId ?? this.DispatcherId,
flushTimeout: flushTimeout ?? this.FlushTimeout,
eosCommitInterval: eosCommitInterval ?? this.EosCommitInterval,
maxBlock: maxBlock ?? this.MaxBlock,
properties: properties ?? this.Properties);

public static ProducerSettings<TKey, TValue> Create(ActorSystem system, ISerializer<TKey> keySerializer, ISerializer<TValue> valueSerializer)
Expand All @@ -98,6 +112,7 @@ public static ProducerSettings<TKey, TValue> Create(Akka.Configuration.Config co
dispatcherId: config.GetString("use-dispatcher", "akka.kafka.default-dispatcher"),
flushTimeout: config.GetTimeSpan("flush-timeout", TimeSpan.FromSeconds(2)),
eosCommitInterval: config.GetTimeSpan("eos-commit-interval", TimeSpan.FromMilliseconds(100)),
maxBlock: config.GetTimeSpan("max.block.ms", TimeSpan.FromSeconds(60)),
properties: ImmutableDictionary<string, string>.Empty);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ internal abstract class BaseSingleSourceLogic<K, V, TMessage> : GraphStageLogic

private readonly ConcurrentQueue<ConsumeResult<K, V>> _buffer = new ConcurrentQueue<ConsumeResult<K, V>>();
protected IImmutableSet<TopicPartition> TopicPartitions { get; set; } = ImmutableHashSet.Create<TopicPartition>();
public IConsumerGroupMetadata ConsumerGroupMetadata { get; private set; }

protected StageActor SourceActor { get; private set; }
internal IActorRef ConsumerActor { get; private set; }
Expand Down Expand Up @@ -66,6 +67,9 @@ public override void PreStart()
ConsumerActor = CreateConsumerActor();
SourceActor.Watch(ConsumerActor);

// get consumer metadata before consuming messages
ConsumerActor.Tell(KafkaConsumerActorMetadata.Internal.ConsumerGroupMetadataRequest.Instance, SourceActor.Ref);

ConfigureSubscription();
}

Expand Down Expand Up @@ -109,6 +113,10 @@ protected virtual void MessageHandling((IActorRef, object) args)
{
switch (args.Item2)
{
case KafkaConsumerActorMetadata.Internal.ConsumerGroupMetadata metadata:
ConsumerGroupMetadata = metadata.Metadata;
break;

case KafkaConsumerActorMetadata.Internal.Messages<K, V> msg:
// might be more than one in flight when we assign/revoke tps
if (msg.RequestId == _requestId)
Expand Down Expand Up @@ -150,7 +158,8 @@ private void Pump()
{
if (_buffer.TryDequeue(out var message))
{
Push(_shape.Outlet, _messageBuilder.CreateMessage(message));
var result = _messageBuilder.CreateMessage(message, ConsumerGroupMetadata);
Push(_shape.Outlet, result);
Pump();
}
else if (!_requested && TopicPartitions.Any())
Expand Down
41 changes: 30 additions & 11 deletions src/Akka.Streams.Kafka/Stages/Consumers/Abstract/SubSourceLogic.cs
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ private class CloseRevokedPartitions { }
/// </summary>
private IImmutableSet<TopicPartition> _partitionsToRevoke = ImmutableHashSet<TopicPartition>.Empty;

public IConsumerGroupMetadata ConsumerGroupMetadata { get; private set; }

protected StageActor SourceActor { get; private set; }
public IActorRef ConsumerActor { get; private set; }
Expand Down Expand Up @@ -107,6 +108,10 @@ public override void PreStart()
{
switch (args.Item2)
{
case KafkaConsumerActorMetadata.Internal.ConsumerGroupMetadata metadata:
ConsumerGroupMetadata = metadata.Metadata;
break;

case Status.Failure failure:
var exception = failure.Cause;
switch (_decider(failure.Cause))
Expand Down Expand Up @@ -140,6 +145,8 @@ public override void PreStart()
$"kafka-consumer-{_actorNumber}");

SourceActor.Watch(ConsumerActor);

ConsumerActor.Tell(KafkaConsumerActorMetadata.Internal.ConsumerGroupMetadataRequest.Instance, SourceActor.Ref);

switch (_subscription)
{
Expand Down Expand Up @@ -285,8 +292,11 @@ private void EmitSubSourcesForPendingPartitions()
_pendingPartitions = _pendingPartitions.Remove(topicPartition);
_partitionsInStartup = _partitionsInStartup.Add(topicPartition);

var subSourceStage = new SubSourceStreamStage<K, V, TMessage>(topicPartition, ConsumerActor, _subsourceStartedCallback,
_subsourceCancelledCallback, _messageBuilder, _decider, _actorNumber);
var subSourceStage = new SubSourceStreamStage<K, V, TMessage>(
topicPartition, ConsumerActor, ConsumerGroupMetadata,
_subsourceStartedCallback, _subsourceCancelledCallback,
_messageBuilder, _decider, _actorNumber);

var subsource = Source.FromGraph(subSourceStage);

Push(_shape.Outlet, (topicPartition, subsource));
Expand Down Expand Up @@ -359,6 +369,7 @@ private class SubSourceStreamStage<K, V, TMsg> : GraphStage<SourceShape<TMsg>>
private readonly TopicPartition _topicPartition;
private readonly IActorRef _consumerActor;
private readonly Action<(TopicPartition, IControl)> _subSourceStartedCallback;
private readonly IConsumerGroupMetadata _consumerGroupMetadata;
private readonly Action<(TopicPartition, Option<ConsumeResult<K, V>>)> _subSourceCancelledCallback;
private readonly IMessageBuilder<K, V, TMsg> _messageBuilder;
private readonly int _actorNumber;
Expand All @@ -367,16 +378,20 @@ private class SubSourceStreamStage<K, V, TMsg> : GraphStage<SourceShape<TMsg>>
public Outlet<TMsg> Out { get; }
public override SourceShape<TMsg> Shape { get; }

public SubSourceStreamStage(TopicPartition topicPartition, IActorRef consumerActor,
Action<(TopicPartition, IControl)> subSourceStartedCallback,
Action<(TopicPartition, Option<ConsumeResult<K, V>>)> subSourceCancelledCallback,
IMessageBuilder<K, V, TMsg> messageBuilder,
Decider decider,
int actorNumber)
public SubSourceStreamStage(
TopicPartition topicPartition,
IActorRef consumerActor,
IConsumerGroupMetadata consumerGroupMetadata,
Action<(TopicPartition, IControl)> subSourceStartedCallback,
Action<(TopicPartition, Option<ConsumeResult<K, V>>)> subSourceCancelledCallback,
IMessageBuilder<K, V, TMsg> messageBuilder,
Decider decider,
int actorNumber)
{
_topicPartition = topicPartition;
_consumerActor = consumerActor;
_subSourceStartedCallback = subSourceStartedCallback;
_consumerGroupMetadata = consumerGroupMetadata;
_subSourceCancelledCallback = subSourceCancelledCallback;
_messageBuilder = messageBuilder;
_decider = decider;
Expand All @@ -388,8 +403,9 @@ public SubSourceStreamStage(TopicPartition topicPartition, IActorRef consumerAct

protected override GraphStageLogic CreateLogic(Attributes inheritedAttributes)
{
return new SubSourceStreamStageLogic(Shape, _topicPartition, _consumerActor, _actorNumber, _messageBuilder, _decider,
_subSourceStartedCallback, _subSourceCancelledCallback);
return new SubSourceStreamStageLogic(
Shape, _topicPartition, _consumerActor, _actorNumber, _messageBuilder, _decider,
_consumerGroupMetadata, _subSourceStartedCallback, _subSourceCancelledCallback);
}

private class SubSourceStreamStageLogic : GraphStageLogic
Expand All @@ -404,12 +420,14 @@ private class SubSourceStreamStageLogic : GraphStageLogic
private bool _requested = false;
private StageActor _subSourceActor;
private readonly Decider _decider;
private readonly IConsumerGroupMetadata _consumerGroupMetadata;
private readonly ConcurrentQueue<ConsumeResult<K, V>> _buffer = new ConcurrentQueue<ConsumeResult<K, V>>();

public PromiseControl<TMsg> Control { get; }

public SubSourceStreamStageLogic(SourceShape<TMsg> shape, TopicPartition topicPartition, IActorRef consumerActor,
int actorNumber, IMessageBuilder<K, V, TMsg> messageBuilder, Decider decider,
IConsumerGroupMetadata consumerGroupMetadata,
Action<(TopicPartition, IControl)> subSourceStartedCallback,
Action<(TopicPartition, Option<ConsumeResult<K, V>>)> subSourceCancelledCallback)
: base(shape)
Expand All @@ -420,6 +438,7 @@ public SubSourceStreamStageLogic(SourceShape<TMsg> shape, TopicPartition topicPa
_actorNumber = actorNumber;
_messageBuilder = messageBuilder;
_decider = decider;
_consumerGroupMetadata = consumerGroupMetadata;
_subSourceStartedCallback = subSourceStartedCallback;
_requestMessages = new KafkaConsumerActorMetadata.Internal.RequestMessages(0, ImmutableHashSet.Create(topicPartition));

Expand Down Expand Up @@ -493,7 +512,7 @@ private void Pump()
{
if (_buffer.TryDequeue(out var message))
{
Push(_shape.Outlet, _messageBuilder.CreateMessage(message));
Push(_shape.Outlet, _messageBuilder.CreateMessage(message, _consumerGroupMetadata));
Pump();
}
else if (!_requested)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,8 @@ private void DrainHandling((IActorRef, object) arg, Action<(IActorRef, object)>
}

/// <inheritdoc />
public TransactionalMessage<K, V> CreateMessage(ConsumeResult<K, V> record) => _messageBuilder.CreateMessage(record);
public TransactionalMessage<K, V> CreateMessage(ConsumeResult<K, V> record, IConsumerGroupMetadata consumerGroupMetadata)
=> _messageBuilder.CreateMessage(record, ConsumerGroupMetadata);

/// <inheritdoc />
public string GroupId => _settings.GroupId;
Expand Down
Loading