From a2e70050a34475d80f64a4b86a70b206b266cc4b Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 15:35:17 +0400 Subject: [PATCH 01/13] bump version, suggest keras3 --- DESCRIPTION | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index ea264ca..40f2390 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: kerastuneR Type: Package Title: Interface to 'Keras Tuner' -Version: 0.1.0.6 +Version: 0.1.0.7 Authors@R: c( person("Turgut", "Abdullayev", role = c("aut", "cre"), email = "turqut.a.314@gmail.com"), @@ -35,7 +35,7 @@ Imports: crayon, magick Suggests: - keras, + keras3, knitr, tfdatasets, testthat, From f6c94828d880d17c0f3c7b1213a01709255b8228 Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 15:35:30 +0400 Subject: [PATCH 02/13] update tf version --- .github/workflows/keras_tuner_stable.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/keras_tuner_stable.yml b/.github/workflows/keras_tuner_stable.yml index 20a27cc..085fad4 100644 --- a/.github/workflows/keras_tuner_stable.yml +++ b/.github/workflows/keras_tuner_stable.yml @@ -12,10 +12,10 @@ jobs: fail-fast: false matrix: config: - - { os: macOS-latest, tf: '2.13.0', keras-tuner: '1.3.5'} - - { os: windows-latest, tf: '2.13.0', keras-tuner: '1.3.5'} - - { os: windows-latest, tf: 'nightly', keras-tuner: '1.3.5'} - - { os: macOS-latest, tf: 'nightly', keras-tuner: '1.3.5'} + - { os: macOS-latest, tf: '2.16.1', keras-tuner: '1.4.7'} + - { os: windows-latest, tf: '2.16.1', keras-tuner: '1.4.7'} + - { os: windows-latest, tf: 'nightly', keras-tuner: '1.4.7'} + - { os: macOS-latest, tf: 'nightly', keras-tuner: '1.4.7'} env: R_REMOTES_NO_ERRORS_FROM_WARNINGS: true TF_VERSION: ${{ matrix.config.tf }} From eb393c6befd2fececcc40263c9829c4af42940d8 Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 15:36:03 +0400 Subject: [PATCH 03/13] the default version of keras-tuner is 1.4.7 --- R/install.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/install.R b/R/install.R index c9d3ce1..4b31abf 100644 --- a/R/install.R +++ b/R/install.R @@ -21,7 +21,7 @@ install_kerastuner <- function(version = NULL, ..., bayesian = TRUE, restart_ses if (is.null(version) & !from_git) { - module_string <- paste0("keras-tuner==", '1.3.5') + module_string <- paste0("keras-tuner==", '1.4.7') } else if (!is.null(version)) { module_string <- paste0("keras-tuner==", version) } else if (isTRUE(from_git)) { From 352b068f3c07c47444a480d5d966556c8535193c Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 15:36:17 +0400 Subject: [PATCH 04/13] update tests --- tests/testthat.R | 2 +- tests/testthat/test-BayesOptim.R | 3 +- tests/testthat/test-MNIST-conv.R | 67 +++++++++++++++---------------- tests/testthat/test-build.R | 3 +- tests/testthat/test-hp-space.R | 4 +- tests/testthat/test-hyper_class.R | 1 - 6 files changed, 36 insertions(+), 44 deletions(-) diff --git a/tests/testthat.R b/tests/testthat.R index b61ac4d..7066df2 100644 --- a/tests/testthat.R +++ b/tests/testthat.R @@ -2,7 +2,7 @@ library(testthat) library(tensorflow) library(kerastuneR) -library(keras) +library(keras3) test_check("kerastuneR") diff --git a/tests/testthat/test-BayesOptim.R b/tests/testthat/test-BayesOptim.R index dc4e674..4615987 100644 --- a/tests/testthat/test-BayesOptim.R +++ b/tests/testthat/test-BayesOptim.R @@ -4,7 +4,6 @@ source("utils.R") test_succeeds("Can run Bayesian Optimization", { - library(keras) library(tensorflow) library(dplyr) library(tfdatasets) @@ -118,7 +117,7 @@ test_succeeds("Can run Bayesian Optimization", { mnist_train$x = tf$cast(mnist_train$x, 'float32') / 255 - mnist_train$x = k_reshape(mnist_train$x,shape = c(6e4,28,28,1)) + mnist_train$x = tf$reshape(mnist_train$x,shape = c(6e4L,28L,28L,1L)) mnist_train$y = tf$dtypes$cast(mnist_train$y, 'float32') if (!Sys.info()[1] %in% 'Windows') { diff --git a/tests/testthat/test-MNIST-conv.R b/tests/testthat/test-MNIST-conv.R index 270f24e..a41dfdf 100644 --- a/tests/testthat/test-MNIST-conv.R +++ b/tests/testthat/test-MNIST-conv.R @@ -3,44 +3,50 @@ context("build(hp) - MNIST") source("utils.R") test_succeeds("Can run hyper_class", { - library(keras) library(dplyr) library(tfdatasets) library(kerastuneR) - conv_build_model = function(hp) { - 'Builds a convolutional model.' - inputs = tf$keras$Input(shape=list(28L, 28L, 1L)) + conv_build_model <- function(hp) { + inputs <- tf$keras$Input(shape = list(28L, 28L, 1L)) + x <- inputs - x = inputs - - for (i in 1:hp$Int('conv_layers', 1L, 3L, default=3L)) { - x = tf$keras$layers$Conv2D(filters = hp$Int(paste('filters_', i, sep = ''), 4L, 32L, step=4L, default=8L), - kernel_size = hp$Int(paste('kernel_size_', i, sep = ''), 3L, 5L), - activation ='relu', - padding='same')(x) - if (hp$Choice(paste('pooling', i, sep = ''), c('max', 'avg')) == 'max') { - x = tf$keras$layers$MaxPooling2D()(x) + for (i in 1:hp$Int('conv_layers', 1L, 3L, default = 3L)) { + x <- tf$keras$layers$Conv2D( + filters = hp$Int(paste('filters_', i, sep = ''), 4L, 32L, step = 4L, default = 8L), + kernel_size = hp$Int(paste('kernel_size_', i, sep = ''), 3L, 5L), + activation = 'relu', + padding = 'same' + )(x) + + pool_type <- hp$Choice(paste('pooling', i, sep = ''), c('max', 'avg')) + if (pool_type == 'max') { + x <- tf$keras$layers$MaxPooling2D(pool_size = c(2L, 2))(x) # Adding pool_size argument } else { - x = tf$keras$layers$AveragePooling2D()(x) + x <- tf$keras$layers$AveragePooling2D(pool_size = c(2L, 2))(x) # Adding pool_size argument } - x = tf$keras$layers$BatchNormalization()(x) - x = tf$keras$layers$ReLU()(x) + x <- tf$keras$layers$BatchNormalization()(x) + x <- tf$keras$layers$ReLU()(x) } - if (hp$Choice('global_pooling', c('max', 'avg')) == 'max') { - x = tf$keras$layers$GlobalMaxPool2D()(x) + + global_pooling_type <- hp$Choice('global_pooling', c('max', 'avg')) + if (global_pooling_type == 'max') { + x <- tf$keras$layers$GlobalMaxPool2D()(x) } else { - x = tf$keras$layers$GlobalAveragePooling2D()(x) + x <- tf$keras$layers$GlobalAveragePooling2D()(x) } - outputs = tf$keras$layers$Dense(10L, activation='softmax')(x) - model = tf$keras$Model(inputs, outputs) - optimizer = hp$Choice('optimizer', c('adam', 'sgd')) - model %>% compile(optimizer, loss='sparse_categorical_crossentropy', metrics='accuracy') + outputs <- tf$keras$layers$Dense(10L, activation = 'softmax')(x) + model <- tf$keras$Model(inputs, outputs) + + optimizer <- hp$Choice('optimizer', c('adam', 'sgd')) + model %>% compile(optimizer, loss = 'sparse_categorical_crossentropy', metrics = 'accuracy') + return(model) } + testthat::expect_length(class(Hyperband( hypermodel = conv_build_model, objective='val_accuracy', @@ -51,14 +57,6 @@ test_succeeds("Can run hyper_class", { project_name = 'mnist')),5) - testthat::expect_match(Hyperband( - hypermodel = conv_build_model, - objective = 'val_accuracy', - max_epochs = 1, - factor = 2, - hyperband_iterations = 1, - directory = 'results_dir', - project_name = 'mnist') %>% capture.output(), 'keras_tuner.tuners.hyperband.Hyperband') main = function() { tuner = Hyperband( @@ -74,8 +72,8 @@ test_succeeds("Can run hyper_class", { c(mnist_train, mnist_test) %<-% mnist_data rm(mnist_data) - mnist_train$x = k_reshape(mnist_train$x,shape = c(6e4,28,28,1)) - mnist_test$x = k_reshape(mnist_test$x,shape = c(1e4,28,28,1)) + mnist_train$x = tf$reshape(mnist_train$x,shape = c(6e4L,28L,28L,1L)) + mnist_test$x = tf$reshape(mnist_test$x,shape = c(1e4L,28L,28L,1L)) mnist_train = tensor_slices_dataset(list(tf$dtypes$cast(mnist_train$x, 'float32') / 255., mnist_train$y)) %>% dataset_shuffle(1e3) %>% dataset_batch(1e2) %>% dataset_repeat() @@ -88,8 +86,7 @@ test_succeeds("Can run hyper_class", { steps_per_epoch=600, validation_data=mnist_test, validation_steps=100, - epochs=1, - callbacks=c(tf$keras$callbacks$EarlyStopping('val_accuracy')) + epochs=1 ) } }) diff --git a/tests/testthat/test-build.R b/tests/testthat/test-build.R index 8ee1e6f..8db7552 100644 --- a/tests/testthat/test-build.R +++ b/tests/testthat/test-build.R @@ -3,7 +3,6 @@ context("build(hp)") source("utils.R") test_succeeds("Can run build(hp) and plot_tuner()", { - library(keras) library(tensorflow) library(dplyr) library(kerastuneR) @@ -75,7 +74,7 @@ test_succeeds("Can run build(hp) and plot_tuner()", { best_model = tuner2$hypermodel$build(best_hps) # Train the best model - best_model %>% fit(X_train, y_train, epochs=50, validation_split=0.2) + best_model %>% fit(X_train, y_train, epochs=5, validation_split=0.2) } }) diff --git a/tests/testthat/test-hp-space.R b/tests/testthat/test-hp-space.R index 85752dc..f1d363d 100644 --- a/tests/testthat/test-hp-space.R +++ b/tests/testthat/test-hp-space.R @@ -3,7 +3,7 @@ context('hp space') source("utils.R") test_succeeds("Can run hp-space", { - library(keras) + library(keras3) library(dplyr) library(kerastuneR) @@ -22,8 +22,6 @@ test_succeeds("Can run hp-space", { hp$Choice('learning_rate',values =c(1e-1, 1e-3)) hp$Int('num_layers', 2L, 20L) - testthat::expect_match(capture.output(hp),'keras_tuner.engine.hyperparameters.hyperparameters.HyperParameters') - mnist_model = function(hp) { diff --git a/tests/testthat/test-hyper_class.R b/tests/testthat/test-hyper_class.R index bdb6027..cc2548d 100644 --- a/tests/testthat/test-hyper_class.R +++ b/tests/testthat/test-hyper_class.R @@ -3,7 +3,6 @@ context("build(hp) - Hyperclass") source("utils.R") test_succeeds("Can run hyper_class", { - library(keras) library(tensorflow) library(dplyr) library(kerastuneR) From da6bc7cecc7bef25525ffd5b988f0d411b4b35ef Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 15:36:51 +0400 Subject: [PATCH 05/13] update cran comments --- cran-comments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cran-comments.md b/cran-comments.md index c56868b..8bdcfbb 100644 --- a/cran-comments.md +++ b/cran-comments.md @@ -4,7 +4,7 @@ This is a 4th submission of 'kerastuneR' 0.1.0.6 * Github actions: windows-latest * Github actions: macOS-latest -* Github actions: ubuntu-16.04 +* Github actions: ubuntu-20.04 ## R CMD check results From 23e8d79c8673dc1e3b111d3a0dade50f926a279a Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 16:12:46 +0400 Subject: [PATCH 06/13] update tests --- tests/testthat/test-hp-space.R | 4 ++-- tests/testthat/test-resnet.R | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/testthat/test-hp-space.R b/tests/testthat/test-hp-space.R index f1d363d..d32907c 100644 --- a/tests/testthat/test-hp-space.R +++ b/tests/testthat/test-hp-space.R @@ -14,8 +14,8 @@ test_succeeds("Can run hp-space", { mnist_train$x = tf$dtypes$cast(mnist_train$x, 'float32') / 255. mnist_test$x = tf$dtypes$cast(mnist_test$x, 'float32') / 255. - mnist_train$x = k_reshape(mnist_train$x,shape = c(6e4,28,28)) - mnist_test$x = k_reshape(mnist_test$x,shape = c(1e4,28,28)) + mnist_train$x = tf$reshape(mnist_train$x,shape = c(6e4L,28L,28L)) + mnist_test$x = tf$reshape(mnist_test$x,shape = c(1e4L,28L,28L)) hp = HyperParameters() diff --git a/tests/testthat/test-resnet.R b/tests/testthat/test-resnet.R index 0db76ad..8b9e01b 100644 --- a/tests/testthat/test-resnet.R +++ b/tests/testthat/test-resnet.R @@ -3,7 +3,7 @@ context("build(hp) - ResNet") source("utils.R") test_succeeds("Can run hyper_class", { - library(keras) + library(dplyr) library(kerastuneR) @@ -12,7 +12,6 @@ test_succeeds("Can run hyper_class", { hypermodel = HyperResNet(input_shape = list(300L, 300L, 3L), classes = 10L) hypermodel2 = HyperXception(input_shape = list(300L, 300L, 3L), classes = 10L) - testthat::expect_match(hypermodel %>% capture.output(),'keras_tuner.applications.resnet.HyperResNet') tuner = Hyperband( hypermodel = hypermodel, @@ -21,7 +20,6 @@ test_succeeds("Can run hyper_class", { directory = 'my_dir', project_name='helloworld') - testthat::expect_match(tuner %>% capture.output(),'keras_tuner.tuners.hyperband.Hyperband') train_data = cifar$train$x[1:30,1:32,1:32,1:3] test_data = cifar$train$y[1:30,1] %>% as.matrix() From daac2f22bb7a473c7efe8efa710f89d4583c00f6 Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 16:24:23 +0400 Subject: [PATCH 07/13] fix integer issues --- R/random_search.R | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/R/random_search.R b/R/random_search.R index 35403eb..dc5c81d 100644 --- a/R/random_search.R +++ b/R/random_search.R @@ -78,7 +78,8 @@ RandomSearch = function(hypermodel, objective, max_trials, seed = NULL, if(is.null(hyperparameters)) args$hyperparameters <- NULL - + if(!is.null(args$executions_per_trial)) + args$executions_per_trial <- as.integer(args$executions_per_trial) do.call(kerastuner$tuners$RandomSearch, args) From 3b7c13f58c2c53dd35b1c01997de25469d2c55cf Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 16:24:36 +0400 Subject: [PATCH 08/13] switch to keras3 --- DESCRIPTION | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 40f2390..299dd28 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -20,7 +20,7 @@ BugReports: https://github.com/EagerAI/kerastuneR/issues/ SystemRequirements: TensorFlow >= 2.0 (https://www.tensorflow.org/) Encoding: UTF-8 LazyData: true -RoxygenNote: 7.2.3 +RoxygenNote: 7.3.1 Imports: reticulate, tensorflow, diff --git a/README.md b/README.md index e8447f2..c3798ab 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ y_data2 <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix() This function returns a compiled model. ``` -library(keras) +library(keras3) library(tensorflow) library(kerastuneR) From 9806155bc0e101f12cac6f8f8bd7ab1414c355ef Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 16:51:34 +0400 Subject: [PATCH 09/13] fix test --- tests/testthat/test-hp-space.R | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/testthat/test-hp-space.R b/tests/testthat/test-hp-space.R index d32907c..d328bf5 100644 --- a/tests/testthat/test-hp-space.R +++ b/tests/testthat/test-hp-space.R @@ -53,7 +53,6 @@ test_succeeds("Can run hp-space", { validation_split=0.2, epochs=1) - testthat::expect_match(capture.output(tuner),'keras_tuner.tuners.randomsearch.RandomSearch') }) From 5d84db73855cf8e92714b260f4e58dcca8c16cd4 Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 17:07:44 +0400 Subject: [PATCH 10/13] remove files --- vignettes/BayesianOptimisation.R | 133 ------------------------------- vignettes/HyperModel_subclass.R | 96 ---------------------- vignettes/Introduction.R | 3 - vignettes/MNIST.R | 83 ------------------- vignettes/best_practice.R | 102 ------------------------ 5 files changed, 417 deletions(-) delete mode 100644 vignettes/BayesianOptimisation.R delete mode 100644 vignettes/HyperModel_subclass.R delete mode 100644 vignettes/Introduction.R delete mode 100644 vignettes/MNIST.R delete mode 100644 vignettes/best_practice.R diff --git a/vignettes/BayesianOptimisation.R b/vignettes/BayesianOptimisation.R deleted file mode 100644 index d758667..0000000 --- a/vignettes/BayesianOptimisation.R +++ /dev/null @@ -1,133 +0,0 @@ -## ----setup, include=FALSE----------------------------------------------------- -knitr::opts_chunk$set(echo = TRUE, eval = F) - -## ----------------------------------------------------------------------------- -# -# library(keras) -# library(tensorflow) -# library(dplyr) -# library(tfdatasets) -# library(kerastuneR) -# library(reticulate) -# -# -# conv_build_model = function(hp) { -# 'Builds a convolutional model.' -# inputs = tf$keras$Input(shape=c(28L, 28L, 1L)) -# -# x = inputs -# -# for (i in 1:hp$Int('conv_layers', 1L, 3L, default=3L)) { -# x = tf$keras$layers$Conv2D(filters = hp$Int(paste('filters_', i, sep = ''), 4L, 32L, step=4L, default=8L), -# kernel_size = hp$Int(paste('kernel_size_', i, sep = ''), 3L, 5L), -# activation ='relu', -# padding='same')(x) -# if (hp$Choice(paste('pooling', i, sep = ''), c('max', 'avg')) == 'max') { -# x = tf$keras$layers$MaxPooling2D()(x) -# } else { -# x = tf$keras$layers$AveragePooling2D()(x) -# } -# x = tf$keras$layers$BatchNormalization()(x) -# x = tf$keras$layers$ReLU()(x) -# -# } -# if (hp$Choice('global_pooling', c('max', 'avg')) == 'max') { -# x = tf$keras$layers$GlobalMaxPooling2D()(x) -# } else { -# x = tf$keras$layers$GlobalAveragePooling2D()(x) -# } -# -# outputs = tf$keras$layers$Dense(10L, activation='softmax')(x) -# model = tf$keras$Model(inputs, outputs) -# optimizer = hp$Choice('optimizer', c('adam', 'sgd')) -# model %>% compile(optimizer, loss='sparse_categorical_crossentropy', metrics='accuracy') -# return(model) -# } -# -# MyTuner = PyClass( -# 'Tuner', -# inherit = Tuner_class(), -# list( -# run_trial = function(self, trial, train_ds){ -# hp = trial$hyperparameters -# train_ds = train_ds$batch(hp$Int('batch_size', 32L, 128L, step=32L, default=64L)) -# model = self$hypermodel$build(trial$hyperparameters) -# lr = hp$Float('learning_rate', 1e-4, 1e-2, sampling='log', default=1e-3) -# optimizer = tf$keras$optimizers$Adam(lr) -# epoch_loss_metric = tf$keras$metrics$Mean() -# -# -# run_train_step = function(data){ -# images = data[[1]] -# labels = data[[2]] -# -# -# with (tf$GradientTape() %as% tape,{ -# logits = model(images) -# loss = tf$keras$losses$sparse_categorical_crossentropy(labels, logits) -# if(length(model$losses) > 0){ -# loss = loss + tf$math$add_n(model$losses) -# } -# gradients = tape$gradient(loss, model$trainable_variables) -# }) -# optimizer$apply_gradients(purrr::transpose(list(gradients, model$trainable_variables))) -# epoch_loss_metric$update_state(loss) -# loss -# } -# -# for (epoch in 1:1) { -# print(paste('Epoch',epoch)) -# self$on_epoch_begin(trial, model, epoch, logs= list()) -# intializer = make_iterator_one_shot(train_ds) -# -# for (batch in 1:length(iterate(train_ds))) { -# -# init_next = iter_next(intializer) -# -# self$on_batch_begin(trial, model, batch, logs=list()) -# batch_loss = as.numeric(run_train_step(init_next)) -# self$on_batch_end(trial, model, batch, logs=list(paste('loss', batch_loss))) -# -# if (batch %% 100L == 0L){ -# loss = epoch_loss_metric$result()$numpy() -# print(paste('Batch',batch, 'Average loss', loss)) -# } -# } -# -# epoch_loss = epoch_loss_metric$result()$numpy() -# self$on_epoch_end(trial, model, epoch, logs=list('loss'= epoch_loss)) -# epoch_loss_metric$reset_states() -# } -# } -# ) -# ) -# -# -# main = function () { -# tuner = MyTuner( -# oracle=BayesianOptimization( -# objective=Objective(name='loss', direction = 'min'), -# max_trials=1), -# hypermodel=conv_build_model, -# directory='results2', -# project_name='mnist_custom_training2') -# -# mnist_data = dataset_fashion_mnist() -# c(mnist_train, mnist_test) %<-% mnist_data -# rm(mnist_data) -# -# mnist_train$x = tf$dtypes$cast(mnist_train$x, 'float32') / 255. -# -# mnist_train$x = keras::k_reshape(mnist_train$x,shape = c(6e4,28,28,1)) -# -# mnist_train = tensor_slices_dataset(mnist_train) %>% dataset_shuffle(1e3) -# -# tuner %>% fit_tuner(train_ds = mnist_train) -# -# best_model = tuner %>% get_best_models(1L) -# -# } -# -# main() -# - diff --git a/vignettes/HyperModel_subclass.R b/vignettes/HyperModel_subclass.R deleted file mode 100644 index 9be4e73..0000000 --- a/vignettes/HyperModel_subclass.R +++ /dev/null @@ -1,96 +0,0 @@ -## ----setup, include=FALSE----------------------------------------------------- -knitr::opts_chunk$set(echo = TRUE, eval = F) - -## ----------------------------------------------------------------------------- -# MyHyperModel <- reticulate::PyClass( -# "HyperModel", -# -# inherit = kerastuneR::HyperModel_class(), -# -# list( -# -# `__init__` = function(self, num_classes) { -# -# self$num_classes = num_classes -# NULL -# }, -# -# build = function(self,hp) { -# model = keras_model_sequential() -# model %>% layer_dense(units = hp$Int('units', -# min_value=32L, -# max_value=512L, -# step=32L), -# activation='relu') %>% -# layer_dense(as.integer(self$num_classes), activation='softmax') %>% -# compile( -# optimizer= tf$keras$optimizers$Adam( -# hp$Choice('learning_rate', -# values=c(1e-2, 1e-3, 1e-4))), -# loss='categorical_crossentropy', -# metrics='accuracy') -# } -# ) -# ) - -## ----------------------------------------------------------------------------- -# -# # generate some data -# -# x_data <- matrix(data = runif(500,0,1),nrow = 50,ncol = 5) -# y_data <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix() -# -# x_data2 <- matrix(data = runif(500,0,1),nrow = 50,ncol = 5) -# y_data2 <- ifelse(runif(50,0,1) > 0.6, 1L,0L) %>% as.matrix() -# -# # subclass -# -# MyHyperModel <- reticulate::PyClass( -# "HyperModel", -# -# inherit = kerastuneR::HyperModel_class(), -# -# list( -# -# `__init__` = function(self, num_classes) { -# -# self$num_classes = num_classes -# NULL -# }, -# -# build = function(self,hp) { -# model = keras_model_sequential() -# model %>% layer_dense(units = hp$Int('units', -# min_value=32L, -# max_value=512L, -# step=32L), -# activation='relu') %>% -# layer_dense(as.integer(self$num_classes), activation='softmax') %>% -# compile( -# optimizer= tf$keras$optimizers$Adam( -# hp$Choice('learning_rate', -# values=c(1e-2, 1e-3, 1e-4))), -# loss='categorical_crossentropy', -# metrics='accuracy') -# } -# ) -# ) -# -# # Random Search -# -# hypermodel = MyHyperModel(num_classes = 10) -# -# tuner = RandomSearch( -# hypermodel, -# objective = 'val_accuracy', -# max_trials = 10, -# directory = 'my_dir', -# project_name = 'helloworld') -# -# # Run -# -# tuner %>% fit_tuner(x_data,y_data, -# epochs = 5, -# validation_data = list(x_data2, y_data2)) -# - diff --git a/vignettes/Introduction.R b/vignettes/Introduction.R deleted file mode 100644 index e71d0ea..0000000 --- a/vignettes/Introduction.R +++ /dev/null @@ -1,3 +0,0 @@ -## ----setup, include=FALSE----------------------------------------------------- -knitr::opts_chunk$set(eval = FALSE) - diff --git a/vignettes/MNIST.R b/vignettes/MNIST.R deleted file mode 100644 index 90623d7..0000000 --- a/vignettes/MNIST.R +++ /dev/null @@ -1,83 +0,0 @@ -## ----setup, include=FALSE----------------------------------------------------- -knitr::opts_chunk$set(echo = TRUE,eval = F) - -## ----------------------------------------------------------------------------- -# conv_build_model = function(hp) { -# 'Builds a convolutional model.' -# inputs = tf$keras$Input(shape=list(28L, 28L, 1L)) -# -# x = inputs -# -# for (i in 1:hp$Int('conv_layers', 1L, 3L, default=3L)) { -# x = tf$keras$layers$Conv2D(filters = hp$Int(paste('filters_', i, sep = ''), -# 4L, 32L, step=4L, default=8L), -# kernel_size = hp$Int(paste('kernel_size_', i, sep = ''), 3L, 5L), -# activation ='relu', -# padding='same')(x) -# if (hp$Choice(paste('pooling', i, sep = ''), c('max', 'avg')) == 'max') { -# x = tf$keras$layers$MaxPooling2D()(x) -# } else { -# x = tf$keras$layers$AveragePooling2D()(x) -# } -# x = tf$keras$layers$BatchNormalization()(x) -# x = tf$keras$layers$ReLU()(x) -# -# } -# if (hp$Choice('global_pooling', c('max', 'avg')) == 'max') { -# x = tf$keras$layers$GlobalMaxPool2D()(x) -# } else { -# x = tf$keras$layers$GlobalAveragePooling2D()(x) -# } -# -# outputs = tf$keras$layers$Dense(10L, activation='softmax')(x) -# model = tf$keras$Model(inputs, outputs) -# optimizer = hp$Choice('optimizer', c('adam', 'sgd')) -# model %>% compile(optimizer, loss='sparse_categorical_crossentropy', metrics='accuracy') -# return(model) -# } -# -# main = function() { -# tuner = Hyperband( -# hypermodel = conv_build_model, -# objective = 'val_accuracy', -# max_epochs = 8, -# factor = 2, -# hyperband_iterations = 3, -# directory = 'results_dir', -# project_name='mnist') -# -# # call keras library for downloading MNIST dataset -# library(keras) -# -# mnist_data = dataset_fashion_mnist() -# c(mnist_train, mnist_test) %<-% mnist_data -# rm(mnist_data) -# -# # reshape data -# mnist_train$x = keras::k_reshape(mnist_train$x,shape = c(6e4,28,28,1)) -# mnist_test$x = keras::k_reshape(mnist_test$x,shape = c(1e4,28,28,1)) -# -# # call tfdatasets and slice dataset -# # turn data type into float 32 (features, not labels/outputs) -# library(tfdatasets) -# mnist_train = tensor_slices_dataset(list(tf$dtypes$cast( -# mnist_train$x, 'float32') / 255., mnist_train$y)) %>% -# dataset_shuffle(1e3) %>% dataset_batch(1e2) %>% dataset_repeat() -# -# mnist_test = tensor_slices_dataset(list(tf$dtypes$cast( -# mnist_test$x, 'float32') / 255., mnist_test$y)) %>% -# dataset_batch(1e2) -# -# # finally, begin a training with a bunch of parameters -# tuner %>% fit_tuner(x = mnist_train, -# steps_per_epoch=600, -# validation_data=mnist_test, -# validation_steps=100, -# epochs=2, -# callbacks=c(tf$keras$callbacks$EarlyStopping('val_accuracy')) -# ) -# } -# -# main() -# - diff --git a/vignettes/best_practice.R b/vignettes/best_practice.R deleted file mode 100644 index bddb462..0000000 --- a/vignettes/best_practice.R +++ /dev/null @@ -1,102 +0,0 @@ -## ----setup, include=FALSE----------------------------------------------------- -knitr::opts_chunk$set(echo = TRUE,eval = F) - -## ----------------------------------------------------------------------------- -# -# library(keras) -# library(tensorflow) -# library(kerastuneR) -# -# if(tensorflow::tf_gpu_configured()) { -# physical_devices = tf$config$list_physical_devices('GPU') -# tf$config$experimental$set_memory_growth(physical_devices[[1]],TRUE) -# } -# -# -# # The data, shuffled and split between train and test sets -# mnist <- dataset_mnist() -# x_train <- mnist$train$x -# y_train <- mnist$train$y -# x_test <- mnist$test$x -# y_test <- mnist$test$y -# -# augment_images = function(x, hp) { -# use_rotation = hp$Boolean('use_rotation') -# if(use_rotation) { -# x = tf$keras$layers$experimental$preprocessing$RandomRotation( -# hp$Float('rotation_factor', min_value=0.05, max_value=0.2) -# )(x) -# } -# use_zoom = hp$Boolean('use_zoom') -# if(use_zoom) { -# x = tf$keras$layers$experimental$preprocessing$RandomZoom( -# hp$Float('use_zoom', min_value=0.05, max_value=0.2) -# )(x) -# } -# x -# } -# -# make_model = function(hp) { -# inputs = layer_input(shape=c(28, 28, 1)) -# x = tf$keras$layers$experimental$preprocessing$Rescaling(1. / 255)(inputs) -# x = tf$keras$layers$experimental$preprocessing$Resizing(64L, 64L)(x) -# x = augment_images(x, hp) -# num_block = hp$Int('num_block', min_value=2, max_value=5, step=1) -# num_filters = hp$Int('num_filters', min_value=32, max_value=128, step=32) -# for (i in 1:length(num_block)) { -# x = x %>% layer_conv_2d( -# num_filters, -# kernel_size=3, -# activation='relu', -# padding='same' -# ) %>% -# layer_conv_2d( -# num_filters, -# kernel_size=3, -# activation='relu', -# padding='same' -# ) %>% layer_max_pooling_2d(2) -# } -# reduction_type = hp$Choice('reduction_type', c('flatten', 'avg')) -# -# if(reduction_type == 'flatten') { -# x = x %>% layer_flatten() -# } else { -# x = x %>% layer_global_average_pooling_2d() -# } -# -# x = x %>% layer_dense( -# units=hp$Int('num_dense_units', min_value=32, max_value=512, step=32), -# activation='relu' -# ) %>% layer_dropout( -# hp$Float('dense_dropout', min_value = 0., max_value = 0.7) -# ) -# -# outputs = x %>% layer_dense(10) -# model = keras_model(inputs, outputs) -# learning_rate = hp$Float('learning_rate', min_value = 3e-4, max_value = 3e-3) -# optimizer = optimizer_adam(lr=1e-3) -# model %>% compile(loss = tf$keras$losses$SparseCategoricalCrossentropy(from_logits = TRUE), -# optimizer = optimizer, -# metrics = tf$keras$metrics$SparseCategoricalAccuracy(name='acc')) -# model %>% summary() -# return(model) -# } -# -# -# tuner = RandomSearch( -# make_model, -# objective='val_acc', -# max_trials=2, -# overwrite=TRUE) -# -# -# callbacks=callback_early_stopping(monitor = 'val_acc', mode = 'max', -# patience = 3, baseline = 0.9) -# tuner %>% fit_tuner(x_train, y_train, validation_split = 0.2, -# callbacks = list(callbacks), verbose=1, epochs=2) - -## ----------------------------------------------------------------------------- -# best_hp = tuner %>% get_best_models(1) -# history = model %>% fit(x_train, y_train, validation_split = 0.2, epochs = 2) - From ebe55029da5b93631cfa07ab599b70eb834b3fe7 Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 17:08:48 +0400 Subject: [PATCH 11/13] typo --- vignettes/MNIST.Rmd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vignettes/MNIST.Rmd b/vignettes/MNIST.Rmd index 845e1e1..c16d1b0 100644 --- a/vignettes/MNIST.Rmd +++ b/vignettes/MNIST.Rmd @@ -9,7 +9,7 @@ vignette: > --- ```{r setup, include=FALSE} -knitr::opts_chunk$set(echo = TRUE,eval = F) +knitr::opts_chunk$set(echo = TRUE,eval = FALSE) ``` This tutorial shows the hyperparameter tuning for [MNIST dataset](http://yann.lecun.com/exdb/mnist/). From 9742f9e19fce12e29644a8e6a3f352d3e9b01aff Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 17:10:27 +0400 Subject: [PATCH 12/13] typo --- vignettes/MNIST.Rmd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vignettes/MNIST.Rmd b/vignettes/MNIST.Rmd index c16d1b0..2d80972 100644 --- a/vignettes/MNIST.Rmd +++ b/vignettes/MNIST.Rmd @@ -59,7 +59,7 @@ As the ```build(hp)``` function is ready, we should build a Hyperband object, an Below is the full version of a tuning process for MNIST dataset. -```{r} +``` conv_build_model = function(hp) { 'Builds a convolutional model.' inputs = tf$keras$Input(shape=list(28L, 28L, 1L)) From 0365243be2a4b6399d55b065e953c02455961a70 Mon Sep 17 00:00:00 2001 From: Turgut Date: Sat, 13 Apr 2024 17:12:28 +0400 Subject: [PATCH 13/13] no r code evaluation --- vignettes/BayesianOptimisation.Rmd | 4 ++-- vignettes/HyperModel_subclass.Rmd | 4 ++-- vignettes/best_practice.Rmd | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vignettes/BayesianOptimisation.Rmd b/vignettes/BayesianOptimisation.Rmd index 6ba8ec1..ebcf624 100644 --- a/vignettes/BayesianOptimisation.Rmd +++ b/vignettes/BayesianOptimisation.Rmd @@ -25,9 +25,9 @@ The Tuner class at ```Tuner_class()``` can be subclassed to support advanced use Thanks to [Daniel Falbel from RStudio](https://github.com/dfalbel), the ```Bayesian Optimization``` example was successfully adapted. -```{r } +``` -library(keras) +library(keras3) library(tensorflow) library(dplyr) library(tfdatasets) diff --git a/vignettes/HyperModel_subclass.Rmd b/vignettes/HyperModel_subclass.Rmd index d4f740e..bbec566 100644 --- a/vignettes/HyperModel_subclass.Rmd +++ b/vignettes/HyperModel_subclass.Rmd @@ -18,7 +18,7 @@ We could use __a HyperModel subclass__ instead of a model-building function. So, A HyperModel subclass only needs to implement a ```build(self, hp)``` method. And, again one should return a compiled model inside a ```build``` function. -```{r } +``` MyHyperModel <- reticulate::PyClass( "HyperModel", @@ -53,7 +53,7 @@ MyHyperModel <- reticulate::PyClass( ## Full code -```{r } +``` # generate some data diff --git a/vignettes/best_practice.Rmd b/vignettes/best_practice.Rmd index c5c1074..79a180b 100644 --- a/vignettes/best_practice.Rmd +++ b/vignettes/best_practice.Rmd @@ -14,7 +14,7 @@ knitr::opts_chunk$set(echo = TRUE,eval = F) This tutorial shows the hyperparameter tuning for [MNIST dataset](http://yann.lecun.com/exdb/mnist/). -```{r} +``` library(keras) library(tensorflow) @@ -112,7 +112,7 @@ tuner %>% fit_tuner(x_train, y_train, validation_split = 0.2, Extract model and retrain: -```{r} +``` best_hp = tuner %>% get_best_models(1) history = model %>% fit(x_train, y_train, validation_split = 0.2, epochs = 2) ```