From 9ba7f42be880fb06895894ceff1a3f605b1e3f87 Mon Sep 17 00:00:00 2001 From: Denis Novac Date: Wed, 6 Mar 2024 20:56:08 +0400 Subject: [PATCH] [IHP-24] Recognition docker container (#26) --- .github/workflows/publish-recognizer.yml | 63 ++++++++++ .github/workflows/publish-resizer.yml | 1 + .gitignore | 1 + README.md | 29 +++-- alias.sbt | 1 + docker-compose.yml | 109 +++++++++++++----- recognizer/Dockerfile | 10 ++ recognizer/build_local.sh | 11 ++ recognizer/convert.py | 18 +++ recognizer/download-model.sh | 2 +- .../src/main/resources/application.conf | 10 +- .../imagehosting/recognizer/AppConfig.scala | 3 +- .../imagehosting/recognizer/Main.scala | 16 ++- .../recognizer/NsfwDetection.scala | 76 +++++++----- .../recognizer/ObjectDetection.scala | 8 +- resizer/Dockerfile | 4 +- 16 files changed, 272 insertions(+), 90 deletions(-) create mode 100644 .github/workflows/publish-recognizer.yml create mode 100644 recognizer/Dockerfile create mode 100755 recognizer/build_local.sh create mode 100644 recognizer/convert.py diff --git a/.github/workflows/publish-recognizer.yml b/.github/workflows/publish-recognizer.yml new file mode 100644 index 0000000..17acd9d --- /dev/null +++ b/.github/workflows/publish-recognizer.yml @@ -0,0 +1,63 @@ +name: publish resizer + +on: + push: + branches: [ master ] + paths: + - 'recognizer/**' + - 'domain/**' + - 'common/**' + - 'project/**' + - 'build.sbt' + - '.github/**' + +env: + IMAGE_NAME: image-hosting-processing-recognizer + +jobs: + publish-container: + runs-on: ubuntu-latest + + permissions: + packages: write + contents: read + + steps: + - uses: actions/checkout@v3 + + - name: Log in to registry + run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u $ --password-stdin + + - name: set up JDK 17 + uses: actions/setup-java@v1 + with: + java-version: 17 + + #- name: run tests + # run: sbt test + + #- name: run integration tests + # run: sbt it:test + + - name: Assembly + run: sbt buildRecognizer + + - name: Build image + run: docker build ./recognizer --tag $IMAGE_NAME --label "runnumber=${GITHUB_RUN_ID}" + + - name: Push image + run: | + + IMAGE_ID=ghcr.io/${{ github.repository_owner }}/$IMAGE_NAME + + # Change all uppercase to lowercase + IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') + + # make version be equal to branch name (in case we want to have several branches to push container) + VERSION=$GITHUB_REF_NAME + + echo IMAGE_ID=$IMAGE_ID + echo VERSION=$VERSION + + docker tag $IMAGE_NAME $IMAGE_ID:$VERSION + docker push $IMAGE_ID:$VERSION \ No newline at end of file diff --git a/.github/workflows/publish-resizer.yml b/.github/workflows/publish-resizer.yml index 75611d8..ccf32f3 100644 --- a/.github/workflows/publish-resizer.yml +++ b/.github/workflows/publish-resizer.yml @@ -9,6 +9,7 @@ on: - 'common/**' - 'project/**' - 'build.sbt' + - '.github/**' env: IMAGE_NAME: image-hosting-processing-resizer diff --git a/.gitignore b/.gitignore index fd649b5..150ed1f 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ params.conf .idea .bsp converted-to-torchscript.pt +nsfw_model.pt target/ !.mvn/wrapper/maven-wrapper.jar diff --git a/README.md b/README.md index c8d076a..827f9a4 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ Support part of https://github.com/Baklanov-Soft/image-hosting-storage +See docker-compose for settings example. + ## Resizer Resizer service for generating the previews. Docker Compose contains 2 instances by default (=partitions amount of @@ -10,10 +12,10 @@ new images topic). Environment variables: ``` -KAFKA_BOOTSTRAP_SERVERS - kafka cluster url (Default: localhost:9092) -CONSUMER_GROUP_ID - consumer id, multiple instances with same id will allow horizontal scaling (depends on topic paritions) (Default: resizer-local-test) -NEW_IMAGES_TOPIC - topic for notifications about new images (Default: "new-images.v1") -MINIO_HOST - host of minio from where it will take pictures and where it is going to upload the previews +KAFKA_BOOTSTRAP_SERVERS - kafka cluster url +CONSUMER_GROUP_ID - consumer id, multiple instances with same id will allow horizontal scaling (depends on topic paritions) +NEW_IMAGES_TOPIC - topic for notifications about new images +MINIO_HOST - minio from where it will take pictures and where it is going to upload the previews MINIO_USER MINIO_PASSWORD ``` @@ -36,25 +38,30 @@ It creates multiple preview images inside the same Minio as it reads from (insid Service for object detection and nsfw content detection. NSFW detection based on model: https://huggingface.co/Falconsai/nsfw_image_detection +Currently NSFW detection only works on porn images. It doesn't recognize blood or any other stuff. Converted to DJL TorchScript model (required for service to -work): https://huggingface.co/DenisNovac/nsfw_image_detection/tree/main +work, you will need to mount it to docker (see docker-compose for +reference)): https://huggingface.co/DenisNovac/nsfw_image_detection/tree/main Environment variables: ``` -KAFKA_BOOTSTRAP_SERVERS - kafka cluster url (Default: localhost:9092) -CONSUMER_GROUP_ID - consumer id, multiple instances with same id will allow horizontal scaling (depends on topic paritions) (Default: recognizer-local-test) -NEW_IMAGES_TOPIC - topic for notifications about new images (Default: "new-images.v1") -CATEGORIES_TOPIC - topic for output of service (Default: "categories.v1") +KAFKA_BOOTSTRAP_SERVERS - kafka cluster url +CONSUMER_GROUP_ID - consumer id, multiple instances with same id will allow horizontal scaling (depends on topic paritions) +NEW_IMAGES_TOPIC - topic for notifications about new images +CATEGORIES_TOPIC - topic for output of service DEBUG_CATEGORIES - write debug object detection pictures (draw squares around detected objects) into debug folder (HEAVY PNG) -NSFW_SYNSET - synset.txt file for nsfw detector (list of categories, included in project) +NSFW_SYNSET_PATH - synset.txt file for nsfw detector (list of categories, included in project) NSFW_MODEL_PATH - pre-trained model for nsfw detection, requires one specific model, others could be working wrong -MINIO_HOST - host of minio from where it will take pictures +ENABLE_NSFW_DETECTION - allows to disable nsfw detection completely (and skip it's init) +MINIO_HOST - minio from where it will take (and save debug) pictures MINIO_USER MINIO_PASSWORD ``` +**NOTE:** nsfw model and synset must be in subfolder such as /nsfw (see docker-compose for reference). + ### Protocol Recognizer reads `{NEW_IMAGES_TOPIC}` Kafka topic and accepts messages in following format (v1): diff --git a/alias.sbt b/alias.sbt index 482760b..a068018 100644 --- a/alias.sbt +++ b/alias.sbt @@ -1 +1,2 @@ addCommandAlias("buildResizer", "project resizer;assembly;") +addCommandAlias("buildRecognizer", "project recognizer;assembly;") diff --git a/docker-compose.yml b/docker-compose.yml index 6849b7f..21df733 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,24 +2,11 @@ version: "3" services: - # resizer: - # build: ./resizer - # container_name: resizer - # depends_on: - # - kafka-init - # - minio - # environment: - # KAFKA_BOOTSTRAP_SERVERS: kafka:9092 - # CONSUMER_GROUP_ID: resizer-local-test - # MINIO_HOST: "http://minio:9000" - # MINIO_USER: minioadmin - # MINIO_PASSWORD: minioadmin - resizer1: image: ghcr.io/baklanov-soft/image-hosting-processing-resizer:master container_name: resizer1 depends_on: - - kafka-init + - kafka-init-new-images - minio environment: KAFKA_BOOTSTRAP_SERVERS: kafka:9092 @@ -27,32 +14,88 @@ services: MINIO_HOST: "http://minio:9000" MINIO_USER: minioadmin MINIO_PASSWORD: minioadmin + NEW_IMAGES_TOPIC: "new-images.v1" resizer2: image: ghcr.io/baklanov-soft/image-hosting-processing-resizer:master container_name: resizer2 depends_on: - - kafka-init + - kafka-init-new-images + - minio environment: KAFKA_BOOTSTRAP_SERVERS: kafka:9092 CONSUMER_GROUP_ID: resizer-local-test MINIO_HOST: "http://minio:9000" MINIO_USER: minioadmin MINIO_PASSWORD: minioadmin + NEW_IMAGES_TOPIC: "new-images.v1" + + recognizer1: + image: ghcr.io/baklanov-soft/image-hosting-processing-recognizer:master + container_name: recognizer1 + depends_on: + - kafka-init-new-images + - kafka-init-categories + - minio + volumes: + - recognizer1-djl-cache:/root/.djl.ai + - "./recognizer/synset.txt:/opt/app/nsfw/synset.txt" + # download it from here https://huggingface.co/DenisNovac/nsfw_image_detection/ + - "./recognizer/nsfw_model.pt:/opt/app/nsfw/nsfw_model.pt" + environment: + KAFKA_BOOTSTRAP_SERVERS: kafka:9092 + CONSUMER_GROUP_ID: recognizer-local-test + NEW_IMAGES_TOPIC: "new-images.v1" + CATEGORIES_TOPIC: "categories.v1" + ENABLE_NSFW_DETECTION: true + NSFW_SYNSET_PATH: "nsfw/synset.txt" + NSFW_MODEL_PATH: "nsfw/nsfw_model.pt" + MINIO_HOST: "http://minio:9000" + MINIO_USER: minioadmin + MINIO_PASSWORD: minioadmin + DEBUG_CATEGORIES: true + + recognizer2: + image: ghcr.io/baklanov-soft/image-hosting-processing-recognizer:master + container_name: recognizer2 + depends_on: + - kafka-init-new-images + - kafka-init-categories + - minio + volumes: + - recognizer2-djl-cache:/root/.djl.ai + - "./recognizer/synset.txt:/opt/app/nsfw/synset.txt" + # download it from here https://huggingface.co/DenisNovac/nsfw_image_detection/ + - "./recognizer/nsfw_model.pt:/opt/app/nsfw/nsfw_model.pt" + environment: + KAFKA_BOOTSTRAP_SERVERS: kafka:9092 + CONSUMER_GROUP_ID: recognizer-local-test + NEW_IMAGES_TOPIC: "new-images.v1" + CATEGORIES_TOPIC: "categories.v1" + ENABLE_NSFW_DETECTION: true + NSFW_SYNSET_PATH: "nsfw/synset.txt" + NSFW_MODEL_PATH: "nsfw/nsfw_model.pt" + MINIO_HOST: "http://minio:9000" + MINIO_USER: minioadmin + MINIO_PASSWORD: minioadmin + DEBUG_CATEGORIES: true kafka: container_name: kafka image: bitnami/kafka:3.6.1 ports: - - "9092:9092" + - "9094:9094" environment: + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT + KAFKA_CFG_LISTENERS: INTERNAL://kafka:9092,CONTROLLER://kafka:9093,EXTERNAL://:9094 + KAFKA_CFG_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,EXTERNAL://localhost:9094 + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: INTERNAL + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER + # cluster config + KAFKA_KRAFT_CLUSTER_ID: LelM2dIFQkiUFvXCEcqRWA KAFKA_CFG_NODE_ID: 0 KAFKA_CFG_PROCESS_ROLES: controller,broker - KAFKA_CFG_LISTENERS: PLAINTEXT://kafka:9092,CONTROLLER://kafka:9093 - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: 0@kafka:9093 - KAFKA_CFG_CONTROLLER_LISTENER_NAMES: CONTROLLER - KAFKA_KRAFT_CLUSTER_ID: LelM2dIFQkiUFvXCEcqRWA minio: container_name: minio @@ -138,19 +181,21 @@ services: "--topic", "categories.v1" ] - # kafka-ui: - # image: provectuslabs/kafka-ui - # container_name: kafka-ui - # ports: - # - "8000:8000" - # environment: - # SERVER_PORT: 8000 - # KAFKA_CLUSTERS_0_NAME: image-hosting - # KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 - # KAFKA_CLUSTERS_0_READONLY: true - # depends_on: - # - kafka + kafka-ui: + image: provectuslabs/kafka-ui + container_name: kafka-ui + ports: + - "8000:8000" + environment: + SERVER_PORT: 8000 + KAFKA_CLUSTERS_0_NAME: image-hosting + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092 + KAFKA_CLUSTERS_0_READONLY: true + depends_on: + - kafka volumes: minio-data: db-data: + recognizer1-djl-cache: + recognizer2-djl-cache: diff --git a/recognizer/Dockerfile b/recognizer/Dockerfile new file mode 100644 index 0000000..44c6d18 --- /dev/null +++ b/recognizer/Dockerfile @@ -0,0 +1,10 @@ +FROM eclipse-temurin:17-jre-jammy + +WORKDIR /opt/app + +COPY ./target/scala-2.13/image-hosting-processing-recognizer-assembly-0.1.0-SNAPSHOT.jar ./app.jar + +# subfolder to mount nsfw model and synset here +RUN mkdir /opt/app/nsfw + +ENTRYPOINT ["java", "-cp", "app.jar", "com.github.baklanovsoft.imagehosting.recognizer.Main"] diff --git a/recognizer/build_local.sh b/recognizer/build_local.sh new file mode 100755 index 0000000..9867fa3 --- /dev/null +++ b/recognizer/build_local.sh @@ -0,0 +1,11 @@ +#docker buildx build --platform linux/amd64 -t test/recognizer . + +cd .. + +sbt buildRecognizer + +cd recognizer + +docker build -t test/recognizer . + +docker image ls | grep test/recognizer diff --git a/recognizer/convert.py b/recognizer/convert.py new file mode 100644 index 0000000..c521dee --- /dev/null +++ b/recognizer/convert.py @@ -0,0 +1,18 @@ +from transformers import AutoImageProcessor, AutoModelForImageClassification +import torch +from PIL import Image +from transformers import AutoTokenizer + +model_name = "DenisNovac/nsfw_image_detection" + +model = AutoModelForImageClassification.from_pretrained(model_name, torchscript=True, return_dict=False) + +processor = AutoImageProcessor.from_pretrained(model_name) + +image = Image.open("images/hentai.jpg") +image_inputs = processor(images=image, return_tensors="pt") + +config = {'forward': [image_inputs['pixel_values']]} +converted = torch.jit.trace_module(model, config) + +torch.jit.save(converted, "nsfw_model.pt") diff --git a/recognizer/download-model.sh b/recognizer/download-model.sh index 371563f..264a6b8 100755 --- a/recognizer/download-model.sh +++ b/recognizer/download-model.sh @@ -1,3 +1,3 @@ # https://huggingface.co/DenisNovac/nsfw_image_detection # fork of https://huggingface.co/Falconsai/nsfw_image_detection -wget -O converted-to-torchscript.pt https://huggingface.co/DenisNovac/nsfw_image_detection/resolve/main/converted-to-torchscript.pt?download=true +wget -O nsfw_model.pt https://huggingface.co/DenisNovac/nsfw_image_detection/resolve/main/converted-to-torchscript.pt?download=true diff --git a/recognizer/src/main/resources/application.conf b/recognizer/src/main/resources/application.conf index a7421e7..29c0647 100644 --- a/recognizer/src/main/resources/application.conf +++ b/recognizer/src/main/resources/application.conf @@ -1,6 +1,6 @@ include "params.conf" -kafka-bootstrap-servers = "localhost:9092" +kafka-bootstrap-servers = "localhost:9094" kafka-bootstrap-servers = ${?KAFKA_BOOTSTRAP_SERVERS} consumer-group-id = "recognizer-local-test" @@ -16,9 +16,11 @@ new-images-topic = ${?NEW_IMAGES_TOPIC} debug-categories = false debug-categories = ${?DEBUG_CATEGORIES} -nsfw-synset = "synset.txt" -nsfw-synset = ${?NSFW_SYNSET} -nsfw-model-path = "recognizer/converted-to-torchscript.pt" +enable-nsfw-detection = true +enable-nsfw-detection = ${?ENABLE_NSFW_DETECTION} +nsfw-synset-path = "recognizer/synset.txt" +nsfw-synset-path = ${?NSFW_SYNSET_PATH} +nsfw-model-path = "recognizer/nsfw_model.pt" nsfw-model-path = ${?NSFW_MODEL_PATH} minio { diff --git a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/AppConfig.scala b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/AppConfig.scala index f501b85..5e2cd5a 100644 --- a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/AppConfig.scala +++ b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/AppConfig.scala @@ -9,7 +9,8 @@ final case class AppConfig( newImagesTopic: String, categoriesTopic: String, debugCategories: Boolean, - nsfwSynset: String, + enableNsfwDetection: Boolean, + nsfwSynsetPath: String, nsfwModelPath: String, minio: MinioCreds ) diff --git a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/Main.scala b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/Main.scala index 07065ed..e9518c0 100644 --- a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/Main.scala +++ b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/Main.scala @@ -1,5 +1,6 @@ package com.github.baklanovsoft.imagehosting.recognizer +import cats.syntax.applicative._ import cats.effect._ import com.github.baklanovsoft.imagehosting.NewImage import com.github.baklanovsoft.imagehosting.kafka.{KafkaConsumer, KafkaJsonDeserializer} @@ -30,9 +31,18 @@ object Main extends IOApp with KafkaJsonDeserializer { ) resources = for { - detection <- if (config.debugCategories) ObjectDetection.debug[IO](minioClient) - else ObjectDetection.production[IO] - nsfw <- NsfwDetection.of[IO](config.nsfwModelPath, config.nsfwSynset) + detection <- if (config.debugCategories) ObjectDetection.debug[IO](minioClient) + else ObjectDetection.production[IO] + + nsfw <- { + if (config.enableNsfwDetection) NsfwDetection.of[IO](config.nsfwModelPath, config.nsfwSynsetPath) + else + Resource.eval { + logger.warn("NSFW Detection is disabled") *> + NsfwDetection.dummy[IO].pure[IO] + } + } + categorization <- Resource.eval( CategorizationStream .of[IO]( diff --git a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/NsfwDetection.scala b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/NsfwDetection.scala index 4dec4a1..e72e76a 100644 --- a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/NsfwDetection.scala +++ b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/NsfwDetection.scala @@ -7,12 +7,13 @@ import ai.djl.modality.cv.transform.{CenterCrop, Normalize, Resize, ToTensor} import ai.djl.modality.cv.translator.ImageClassificationTranslator import ai.djl.repository.zoo.Criteria import ai.djl.translate.Translator +import cats.Monad import cats.effect.kernel.{Resource, Sync} import cats.implicits._ import com.github.baklanovsoft.imagehosting.{BucketId, Category, ImageId, Score} -import org.typelevel.log4cats.LoggerFactory +import org.typelevel.log4cats.{Logger, LoggerFactory} -import java.nio.file.Paths +import java.nio.file.{Files, Paths} import scala.jdk.CollectionConverters._ trait NsfwDetection[F[_]] { @@ -24,37 +25,49 @@ trait NsfwDetection[F[_]] { object NsfwDetection { - private def buildTranslator[F[_]: Sync](synsetPath: String): F[Translator[Image, Classifications]] = Sync[F].delay { - // copypasted from here https://github.com/deepjavalibrary/djl/issues/1419 - ImageClassificationTranslator - .builder() - .optSynsetArtifactName(synsetPath) - .addTransform(new Resize(256)) - // from the model description it was trained on 224x224 images so looks like it fits - .addTransform(new CenterCrop(224, 224)) - .addTransform(new ToTensor()) - .addTransform( - new Normalize( - Array( - 0.485f, - 0.456f, - 0.406f - ), - Array( - 0.229f, - 0.224f, - 0.225f + def dummy[F[_]: Monad]: NsfwDetection[F] = new NsfwDetection[F] { + override def detect(image: Image, bucketId: BucketId, imageId: ImageId): F[Option[(Category, Score)]] = + Monad[F].pure(None) + } + + private def buildTranslator[F[_]: Sync](synsetUrl: String): F[Translator[Image, Classifications]] = + Sync[F].delay { + // copypasted from here https://github.com/deepjavalibrary/djl/issues/1419 + ImageClassificationTranslator + .builder() + .optSynsetUrl(synsetUrl) + .addTransform(new Resize(256)) + // from the model description it was trained on 224x224 images so looks like it fits + .addTransform(new CenterCrop(224, 224)) + .addTransform(new ToTensor()) + .addTransform( + new Normalize( + Array( + 0.485f, + 0.456f, + 0.406f + ), + Array( + 0.229f, + 0.224f, + 0.225f + ) ) ) - ) - .optApplySoftmax(true) - .build() - } + .optApplySoftmax(true) + .build() + } - private def acquireModelPredictor[F[_]: Sync](modelPath: String, synsetPath: String) = + private def acquireModelPredictor[F[_]: Sync: Logger](modelPath: String, synsetPath: String) = Resource.make { for { - translator <- buildTranslator(synsetPath) + lookup <- Sync[F].delay(Files.list(Paths.get("./")).toArray.toList) + _ <- Logger[F].info(s"Workdir absolute path: ${Paths.get("./").toAbsolutePath.toString}") + _ <- Logger[F].info(s"Lookup result: $lookup") + synsetUrl <- Sync[F].delay("file://" + Paths.get(synsetPath).toAbsolutePath.toString) + _ <- Logger[F].info(s"Synset constructed url: $synsetUrl") + + translator <- buildTranslator(synsetUrl) criteria <- Sync[F].delay { Criteria .builder() @@ -70,15 +83,16 @@ object NsfwDetection { .build() } - model <- Sync[F].delay(criteria.loadModel()) + model <- Sync[F].delay(criteria.loadModel()) + predictor <- Sync[F].delay(model.newPredictor()) } yield (model, predictor) } { case (model, predictor) => Sync[F].delay(predictor.close()) >> Sync[F].delay(model.close()) } def of[F[_]: Sync: LoggerFactory](modelPath: String, synsetPath: String): Resource[F, NsfwDetection[F]] = for { - logger <- Resource.eval(LoggerFactory[F].create) - (_, predictor) <- acquireModelPredictor[F](modelPath, synsetPath) + implicit0(logger: Logger[F]) <- Resource.eval(LoggerFactory[F].create) + (_, predictor) <- acquireModelPredictor[F](modelPath, synsetPath) } yield new NsfwDetection[F] { override def detect(image: Image, bucketId: BucketId, imageId: ImageId): F[Option[(Category, Score)]] = diff --git a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/ObjectDetection.scala b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/ObjectDetection.scala index 17596b9..d35d8ce 100644 --- a/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/ObjectDetection.scala +++ b/recognizer/src/main/scala/com/github/baklanovsoft/imagehosting/recognizer/ObjectDetection.scala @@ -1,14 +1,13 @@ package com.github.baklanovsoft.imagehosting.recognizer -import cats.implicits._ import ai.djl.Application import ai.djl.engine.Engine import ai.djl.inference.Predictor import ai.djl.modality.cv.Image import ai.djl.modality.cv.output.DetectedObjects import ai.djl.repository.zoo.{Criteria, ZooModel} -import ai.djl.training.util.ProgressBar import cats.effect.kernel.{Resource, Sync} +import cats.implicits._ import com.github.baklanovsoft.imagehosting.s3.MinioClient import com.github.baklanovsoft.imagehosting.{BucketId, Category, ImageId, Score} import org.typelevel.log4cats.LoggerFactory @@ -50,7 +49,7 @@ object ObjectDetection { private def acquireModelPredictor[F[_]: Sync] : Resource[F, (ZooModel[Image, DetectedObjects], Predictor[Image, DetectedObjects])] = Resource.make { - val useEngine = Engines.MxNet + val useEngine = Engines.PyTorch for { criteria <- Sync[F].delay( @@ -58,8 +57,7 @@ object ObjectDetection { .optApplication(Application.CV.OBJECT_DETECTION) .setTypes(classOf[Image], classOf[DetectedObjects]) .optEngine(Engine.getEngine(useEngine.name).getEngineName) - .optFilter("backbone", useEngine.Models.vgg16) - .optProgress(new ProgressBar) + .optFilter("backbone", useEngine.Models.resnet50) .build ) diff --git a/resizer/Dockerfile b/resizer/Dockerfile index 1e54271..f99c570 100644 --- a/resizer/Dockerfile +++ b/resizer/Dockerfile @@ -2,6 +2,6 @@ FROM eclipse-temurin:17.0.6_10-jre-jammy WORKDIR /opt/app -COPY ./target/scala-2.13/image-hosting-processing-resizer-assembly-0.1.0-SNAPSHOT.jar ./ +COPY ./target/scala-2.13/image-hosting-processing-resizer-assembly-0.1.0-SNAPSHOT.jar ./app.jar -ENTRYPOINT ["java", "-cp", "image-hosting-processing-resizer-assembly-0.1.0-SNAPSHOT.jar", "com.github.baklanovsoft.imagehosting.resizer.Main"] +ENTRYPOINT ["java", "-cp", "app.jar", "com.github.baklanovsoft.imagehosting.resizer.Main"]