Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update C1_W2_Assignment.js #54

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 29 additions & 11 deletions C1_Browser-based-TF-JS/W2/assignment/C1_W2_Assignment.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,31 +14,39 @@ function getModel() {
// many layers, filters, and neurons as you like.
// HINT: Take a look at the MNIST example.
model = tf.sequential();

// YOUR CODE HERE

model.add(tf.layers.conv2d({inputShape: [28, 28, 1], filters: 32, kernelSize: 3, activation: 'relu'}));
model.add(tf.layers.maxPooling2d({poolSize: [2, 2]}));
model.add(tf.layers.conv2d({filters: 64, kernelSize: 3, activation: 'relu'}));
model.add(tf.layers.maxPooling2d({poolSize: [2, 2]}));
model.add(tf.layers.flatten());
model.add(tf.layers.dense({units: 128, activation: 'relu'}));
model.add(tf.layers.dense({units: 10, activation: 'softmax'}));

// Compile the model using the categoricalCrossentropy loss,
// the tf.train.adam() optimizer, and `acc` for your metrics.
model.compile(// YOUR CODE HERE);
model.compile({
optimizer: tf.train.adam(),
loss: 'categoricalCrossentropy',
metrics: ['accuracy']
});

return model;
}

async function train(model, data) {

// Set the following metrics for the callback: 'loss', 'val_loss', 'acc', 'val_acc'.
const metrics = // YOUR CODE HERE
const metrics = ['loss', 'val_loss', 'acc', 'val_acc'];


// Create the container for the callback. Set the name to 'Model Training' and
// use a height of 1000px for the styles.
const container = // YOUR CODE HERE
const container = { name: 'Model Training', styles: { height: '1000px' } };


// Use tfvis.show.fitCallbacks() to setup the callbacks.
// Use the container and metrics defined above as the parameters.
const fitCallbacks = // YOUR CODE HERE
const fitCallbacks = tfvis.show.fitCallbacks(container, metrics);

const BATCH_SIZE = 512;
const TRAIN_DATA_SIZE = 6000;
Expand All @@ -47,15 +55,25 @@ async function train(model, data) {
// Get the training batches and resize them. Remember to put your code
// inside a tf.tidy() clause to clean up all the intermediate tensors.
// HINT: Take a look at the MNIST example.
const [trainXs, trainYs] = // YOUR CODE HERE
const [trainXs, trainYs] = tf.tidy(() => {
const d = data.nextTrainBatch(TRAIN_DATA_SIZE);
return [
d.xs.reshape([TRAIN_DATA_SIZE, 28, 28, 1]),
d.labels
];
});


// Get the testing batches and resize them. Remember to put your code
// inside a tf.tidy() clause to clean up all the intermediate tensors.
// HINT: Take a look at the MNIST example.
const [testXs, testYs] = // YOUR CODE HERE
const [testXs, testYs] = tf.tidy(() => {
const d = data.nextTestBatch(TEST_DATA_SIZE);
return [
d.xs.reshape([TEST_DATA_SIZE, 28, 28, 1]),
d.labels
];
});


return model.fit(trainXs, trainYs, {
batchSize: BATCH_SIZE,
validationData: [testXs, testYs],
Expand Down