Advanced Deep Learning with Keras
Chapter 3from keras.layers import Reshape, Conv2DTransposefrom keras.models import Modelfrom keras.callbacks import ReduceLROnPlateau, ModelCheckpointfrom keras.datasets import cifar10from keras.utils import plot_modelfrom keras import backend as Kimport numpy as npimport matplotlib.pyplot as pltimport os# convert from color image (RGB) to grayscale# source: opencv.org# grayscale = 0.299*red + 0.587*green + 0.114*bluedef rgb2gray(rgb):return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])# load the CIFAR10 data(x_train, _), (x_test, _) = cifar10.load_data()# input image dimensions# we assume data format "channels_last"img_rows = x_train.shape[1]img_cols = x_train.shape[2]channels = x_train.shape[3]# create saved_images folderimgs_dir = 'saved_images'save_dir = os.path.join(os.getcwd(), imgs_dir)if not os.path.isdir(save_dir):os.makedirs(save_dir)# display the 1st 100 input images (color and gray)imgs = x_test[:100]imgs = imgs.reshape((10, 10, img_rows, img_cols, channels))imgs = np.vstack([np.hstack(i) for i in imgs])plt.figure()plt.axis('off')plt.title('Test color images (Ground Truth)')plt.imshow(imgs, interpolation='none')plt.savefig('%s/test_color.png' % imgs_dir)plt.show()[ 91 ]
Autoencoders# convert color train and test images to grayx_train_gray = rgb2gray(x_train)x_test_gray = rgb2gray(x_test)# display grayscale version of test imagesimgs = x_test_gray[:100]imgs = imgs.reshape((10, 10, img_rows, img_cols))imgs = np.vstack([np.hstack(i) for i in imgs])plt.figure()plt.axis('off')plt.title('Test gray images (Input)')plt.imshow(imgs, interpolation='none', cmap='gray')plt.savefig('%s/test_gray.png' % imgs_dir)plt.show()# normalize output train and test color imagesx_train = x_train.astype('float32') / 255x_test = x_test.astype('float32') / 255# normalize input train and test grayscale imagesx_train_gray = x_train_gray.astype('float32') / 255x_test_gray = x_test_gray.astype('float32') / 255# reshape images to row x col x channel for CNN output/validationx_train = x_train.reshape(x_train.shape[0], img_rows, img_cols,channels)x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)# reshape images to row x col x channel for CNN inputx_train_gray = x_train_gray.reshape(x_train_gray.shape[0], img_rows,img_cols, 1)x_test_gray = x_test_gray.reshape(x_test_gray.shape[0], img_rows, img_cols, 1)# network parametersinput_shape = (img_rows, img_cols, 1)batch_size = 32kernel_size = 3latent_dim = 256# encoder/decoder number of CNN layers and filters per layerlayer_filters = [64, 128, 256][ 92 ]
- Page 57 and 58: Deep Neural NetworksWhile this chap
- Page 59 and 60: Deep Neural Networks# reshape and n
- Page 61 and 62: Deep Neural NetworksEverything else
- Page 63 and 64: Deep Neural Networksfrom keras.util
- Page 65 and 66: Deep Neural NetworksFigure 2.1.3: T
- Page 67 and 68: Deep Neural NetworksHence, the netw
- Page 69 and 70: Deep Neural NetworksGenerally speak
- Page 71 and 72: Deep Neural NetworksIn the dataset,
- Page 73 and 74: Deep Neural NetworksTransition Laye
- Page 75 and 76: Deep Neural NetworksThere are some
- Page 77 and 78: Deep Neural NetworksResNet v2 is al
- Page 79 and 80: Deep Neural Networks…if version =
- Page 81 and 82: Deep Neural NetworksTo prevent the
- Page 83 and 84: Deep Neural NetworksAverage Pooling
- Page 85 and 86: Deep Neural Networks# orig paper us
- Page 88 and 89: AutoencodersIn the previous chapter
- Page 90 and 91: Chapter 3The autoencoder has the te
- Page 92 and 93: Chapter 3Firstly, we're going to im
- Page 94 and 95: Chapter 3# reconstruct the inputout
- Page 96 and 97: Chapter 3Figure 3.2.2: The decoder
- Page 98 and 99: batch_size=32,model_name="autoencod
- Page 100 and 101: Chapter 3Figure 3.2.6: Digits gener
- Page 102 and 103: Chapter 3As shown in Figure 3.3.2,
- Page 104 and 105: Chapter 3image_size = x_train.shape
- Page 106 and 107: Chapter 3# Mean Square Error (MSE)
- Page 110 and 111: Chapter 3# build the autoencoder mo
- Page 112 and 113: Chapter 3x_train,validation_data=(x
- Page 114: Chapter 3ConclusionIn this chapter,
- Page 117 and 118: Generative Adversarial Networks (GA
- Page 119 and 120: Generative Adversarial Networks (GA
- Page 121 and 122: Generative Adversarial Networks (GA
- Page 123 and 124: Generative Adversarial Networks (GA
- Page 125 and 126: Generative Adversarial Networks (GA
- Page 127 and 128: Generative Adversarial Networks (GA
- Page 129 and 130: Generative Adversarial Networks (GA
- Page 131 and 132: Generative Adversarial Networks (GA
- Page 133 and 134: Generative Adversarial Networks (GA
- Page 135 and 136: Generative Adversarial Networks (GA
- Page 137 and 138: Generative Adversarial Networks (GA
- Page 139 and 140: Generative Adversarial Networks (GA
- Page 141 and 142: Generative Adversarial Networks (GA
- Page 143 and 144: Improved GANsIn summary, the goal o
- Page 145 and 146: Improved GANsThe intuition behind E
- Page 147 and 148: Improved GANsThis makes sense since
- Page 149 and 150: Improved GANsIn the context of GANs
- Page 151 and 152: Improved GANsFigure 5.1.3: Top: Tra
- Page 153 and 154: Improved GANsThe functions include:
- Page 155 and 156: Improved GANsmodels = (generator, d
- Page 157 and 158: Improved GANsfor layer in discrimin
Autoencoders
# convert color train and test images to gray
x_train_gray = rgb2gray(x_train)
x_test_gray = rgb2gray(x_test)
# display grayscale version of test images
imgs = x_test_gray[:100]
imgs = imgs.reshape((10, 10, img_rows, img_cols))
imgs = np.vstack([np.hstack(i) for i in imgs])
plt.figure()
plt.axis('off')
plt.title('Test gray images (Input)')
plt.imshow(imgs, interpolation='none', cmap='gray')
plt.savefig('%s/test_gray.png' % imgs_dir)
plt.show()
# normalize output train and test color images
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# normalize input train and test grayscale images
x_train_gray = x_train_gray.astype('float32') / 255
x_test_gray = x_test_gray.astype('float32') / 255
# reshape images to row x col x channel for CNN output/validation
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols,
channels)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, channels)
# reshape images to row x col x channel for CNN input
x_train_gray = x_train_gray.reshape(x_train_gray.shape[0], img_rows,
img_cols, 1)
x_test_gray = x_test_gray.reshape(x_test_gray.shape[0], img_rows, img_
cols, 1)
# network parameters
input_shape = (img_rows, img_cols, 1)
batch_size = 32
kernel_size = 3
latent_dim = 256
# encoder/decoder number of CNN layers and filters per layer
layer_filters = [64, 128, 256]
[ 92 ]