diff --git a/docs/index.html b/docs/index.html index 097f8c74..09d39911 100644 --- a/docs/index.html +++ b/docs/index.html @@ -190,7 +190,7 @@
And we'll define some customer records to be written:
import org.apache.kafka.clients.producer.ProducerRecord
@@ -221,16 +221,16 @@ Writing typed records with an Avro4s producer
Turning a generic producer into a typed producer is simple. We first ensure that com.sksamuel.avro4s.RecordFormat
instances for our data are in scope:
implicit val CustomerRecordFormat = com.sksamuel.avro4s.RecordFormat[Customer]
-// CustomerRecordFormat: com.sksamuel.avro4s.RecordFormat[Customer] = com.sksamuel.avro4s.RecordFormat$$anon$1@199ded2
+// CustomerRecordFormat: com.sksamuel.avro4s.RecordFormat[Customer] = com.sksamuel.avro4s.RecordFormat$$anon$1@5787b7cd
implicit val CustomerIdRecordFormat = com.sksamuel.avro4s.RecordFormat[CustomerId]
-// CustomerIdRecordFormat: com.sksamuel.avro4s.RecordFormat[CustomerId] = com.sksamuel.avro4s.RecordFormat$$anon$1@1ed4cf89
+// CustomerIdRecordFormat: com.sksamuel.avro4s.RecordFormat[CustomerId] = com.sksamuel.avro4s.RecordFormat$$anon$1@44b18309
And with those implicits in scope, we can create our producer:
val avro4sProducer = producer.map(_.toAvro4s[CustomerId, Customer])
// avro4sProducer: Resource[IO, ProducerApi[[A]IO[A], CustomerId, Customer]] = Bind(
// source = Allocate(
-// resource = cats.effect.kernel.Resource$$$Lambda$10909/0x0000000102d99840@1b2c90b
+// resource = cats.effect.kernel.Resource$$$Lambda$10773/0x0000000102f5b040@1927f3d2
// ),
-// fs = cats.effect.kernel.Resource$$Lambda$11037/0x0000000102e9d840@1917affb
+// fs = cats.effect.kernel.Resource$$Lambda$10901/0x00000001030a2840@b8fea9a
// )
We can now write our typed customer records successfully!
import cats.effect.unsafe.implicits.global
@@ -259,11 +259,11 @@ // consumer: Resource[IO, ConsumerApi[IO, CustomerId, Customer]] = Bind(
// source = Bind(
// source = Allocate(
-// resource = cats.effect.kernel.Resource$$$Lambda$10909/0x0000000102d99840@c4af167
+// resource = cats.effect.kernel.Resource$$$Lambda$10773/0x0000000102f5b040@6501bc5c
// ),
-// fs = com.banno.kafka.consumer.ConsumerApi$Avro$$$Lambda$11040/0x0000000102e9f840@5b237aa1
+// fs = com.banno.kafka.consumer.ConsumerApi$Avro$$$Lambda$10904/0x00000001030a4840@13b9909f
// ),
-// fs = cats.effect.kernel.Resource$$Lambda$11037/0x0000000102e9d840@35618a11
+// fs = cats.effect.kernel.Resource$$Lambda$10901/0x00000001030a2840@6ef6832e
// )
With our Kafka consumer in hand, we'll assign to our consumer our topic partition, with no offsets, so that it starts reading from the first record, and read a stream of records from our Kafka topic:
import org.apache.kafka.common.TopicPartition