Mercurial > repos > bgruening > run_jupyter_job
comparison test-data/tf-script.py @ 0:f4619200cb0a draft
"planemo upload for repository https://github.com/bgruening/galaxytools/tree/master/tools/jupyter_job commit f945b1bff5008ba01da31c7de64e5326579394d6"
author | bgruening |
---|---|
date | Sat, 11 Dec 2021 17:56:38 +0000 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:f4619200cb0a |
---|---|
1 import numpy as np | |
2 import tensorflow as tf | |
3 | |
4 (mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data() | |
5 mnist_images, mnist_labels = mnist_images[:128], mnist_labels[:128] | |
6 dataset = tf.data.Dataset.from_tensor_slices((tf.cast(mnist_images[..., tf.newaxis] / 255, tf.float32), tf.cast(mnist_labels, tf.int64))) | |
7 dataset = dataset.shuffle(1000).batch(32) | |
8 | |
9 tot_loss = [] | |
10 epochs = 1 | |
11 | |
12 mnist_model = tf.keras.Sequential([ | |
13 tf.keras.layers.Conv2D(16, [3, 3], activation='relu'), | |
14 tf.keras.layers.Conv2D(16, [3, 3], activation='relu'), | |
15 tf.keras.layers.GlobalAveragePooling2D(), | |
16 tf.keras.layers.Dense(10) | |
17 ]) | |
18 | |
19 optimizer = tf.keras.optimizers.Adam() | |
20 loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) | |
21 | |
22 for epoch in range(epochs): | |
23 loss_history = [] | |
24 for (batch, (images, labels)) in enumerate(dataset): | |
25 with tf.GradientTape() as tape: | |
26 logits = mnist_model(images, training=True) | |
27 loss_value = loss_object(labels, logits) | |
28 loss_history.append(loss_value.numpy().mean()) | |
29 grads = tape.gradient(loss_value, mnist_model.trainable_variables) | |
30 optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables)) | |
31 tot_loss.append(np.mean(loss_history)) |