Advanced Deep Learning with Keras
Chapter 6# during trainingnoise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])# random class labels and codesnoise_label = np.eye(num_labels)[np.arange(0, 16) % num_labels]noise_code1 = np.random.normal(scale=0.5, size=[16, 1])noise_code2 = np.random.normal(scale=0.5, size=[16, 1])# number of elements in train datasettrain_size = x_train.shape[0]print(model_name,"Labels for generated images: ",np.argmax(noise_label, axis=1))for i in range(train_steps):# train the discriminator for 1 batch# 1 batch of real (label=1.0) and fake images (label=0.0)# randomly pick real images and corresponding labels fromdatasetrand_indexes = np.random.randint(0, train_size, size=batch_size)real_images = x_train[rand_indexes]real_labels = y_train[rand_indexes]# random codes for real imagesreal_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])real_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])# generate fake images, labels and codesnoise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])fake_labels = np.eye(num_labels)[np.random.choice(num_labels,batch_size)]fake_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])fake_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])inputs = [noise, fake_labels, fake_code1, fake_code2]fake_images = generator.predict(inputs)# real + fake images = 1 batch of train datax = np.concatenate((real_images, fake_images))labels = np.concatenate((real_labels, fake_labels))codes1 = np.concatenate((real_code1, fake_code1))codes2 = np.concatenate((real_code2, fake_code2))# label real and fake images# real images label is 1.0y = np.ones([2 * batch_size, 1])# fake images label is 0.0[ 175 ]
Disentangled Representation GANsy[batch_size:, :] = 0# train discriminator network, log the loss and label accuracyoutputs = [y, labels, codes1, codes2]# metrics = ['loss', 'activation_1_loss', 'label_loss',# 'code1_loss', 'code2_loss', 'activation_1_acc',# 'label_acc', 'code1_acc', 'code2_acc']# from discriminator.metrics_namesmetrics = discriminator.train_on_batch(x, outputs)fmt = "%d: [discriminator loss: %f, label_acc: %f]"log = fmt % (i, metrics[0], metrics[6])size])# train the adversarial network for 1 batch# 1 batch of fake images with label=1.0 and# corresponding one-hot label or class + random codes# since the discriminator weights are frozen in# adversarial network only the generator is trained# generate fake images, labels and codesnoise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_fake_labels = np.eye(num_labels)[np.random.choice(num_labels,batch_size)]fake_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])fake_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])# label fake images as realy = np.ones([batch_size, 1])# note that unlike in discriminator training,# we do not save the fake images in a variable# the fake images go to the discriminator input of the# adversarial for classification# log the loss and label accuracyinputs = [noise, fake_labels, fake_code1, fake_code2]outputs = [y, fake_labels, fake_code1, fake_code2]metrics = adversarial.train_on_batch(inputs, outputs)fmt = "%s [adversarial loss: %f, label_acc: %f]"log = fmt % (log, metrics[0], metrics[6])print(log)if (i + 1) % save_interval == 0:if (i + 1) == train_steps:show = Trueelse:show = False[ 176 ]
- Page 141 and 142: Generative Adversarial Networks (GA
- Page 143 and 144: Improved GANsIn summary, the goal o
- Page 145 and 146: Improved GANsThe intuition behind E
- Page 147 and 148: Improved GANsThis makes sense since
- Page 149 and 150: Improved GANsIn the context of GANs
- Page 151 and 152: Improved GANsFigure 5.1.3: Top: Tra
- Page 153 and 154: Improved GANsThe functions include:
- Page 155 and 156: Improved GANsmodels = (generator, d
- Page 157 and 158: Improved GANsfor layer in discrimin
- Page 159 and 160: Improved GANsFollowing figure shows
- Page 161 and 162: Improved GANsThe preceding table sh
- Page 163 and 164: Improved GANsFollowing figure shows
- Page 165 and 166: Improved GANsEssentially, in CGAN w
- Page 167 and 168: Improved GANslayer = Dense(layer_fi
- Page 169 and 170: Improved GANsx = BatchNormalization
- Page 171 and 172: Improved GANsdiscriminator.compile(
- Page 173 and 174: Improved GANssize=batch_size)real_i
- Page 175 and 176: Improved GANsUnlike CGAN, the sampl
- Page 177 and 178: Improved GANsConclusionIn this chap
- Page 179 and 180: Disentangled Representation GANsIn
- Page 181 and 182: Disentangled Representation GANsInf
- Page 183 and 184: Disentangled Representation GANsFol
- Page 185 and 186: Disentangled Representation GANs# A
- Page 187 and 188: Disentangled Representation GANsif
- Page 189 and 190: Disentangled Representation GANsLis
- Page 191: Disentangled Representation GANsdat
- Page 195 and 196: Disentangled Representation GANspyt
- Page 197 and 198: Disentangled Representation GANsThe
- Page 199 and 200: Disentangled Representation GANsSta
- Page 201 and 202: Disentangled Representation GANs( )
- Page 203 and 204: Disentangled Representation GANsThe
- Page 205 and 206: Disentangled Representation GANsfea
- Page 207 and 208: Disentangled Representation GANs# f
- Page 209 and 210: Disentangled Representation GANslat
- Page 211 and 212: Disentangled Representation GANsDis
- Page 213 and 214: Disentangled Representation GANsz_d
- Page 215 and 216: Disentangled Representation GANs2.
- Page 217 and 218: Disentangled Representation GANsFig
- Page 220 and 221: Cross-Domain GANsIn computer vision
- Page 222 and 223: Chapter 7There are many more exampl
- Page 224 and 225: The CycleGAN ModelFigure 7.1.3 show
- Page 226 and 227: Chapter 7Repeat for n training step
- Page 228 and 229: Chapter 7Implementing CycleGAN usin
- Page 230 and 231: filters=16,kernel_size=3,strides=2,
- Page 232 and 233: Chapter 7kernel_size=kernel_size)e3
- Page 234 and 235: Listing 7.1.3, cyclegan-7.1.1.py sh
- Page 236 and 237: Chapter 71) Build target and source
- Page 238 and 239: Chapter 7preal_target,reco_source,r
- Page 240 and 241: size=batch_size)real_source = sourc
Chapter 6
# during training
noise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])
# random class labels and codes
noise_label = np.eye(num_labels)[np.arange(0, 16) % num_labels]
noise_code1 = np.random.normal(scale=0.5, size=[16, 1])
noise_code2 = np.random.normal(scale=0.5, size=[16, 1])
# number of elements in train dataset
train_size = x_train.shape[0]
print(model_name,
"Labels for generated images: ",
np.argmax(noise_label, axis=1))
for i in range(train_steps):
# train the discriminator for 1 batch
# 1 batch of real (label=1.0) and fake images (label=0.0)
# randomly pick real images and corresponding labels from
dataset
rand_indexes = np.random.randint(0, train_size, size=batch_
size)
real_images = x_train[rand_indexes]
real_labels = y_train[rand_indexes]
# random codes for real images
real_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])
real_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])
# generate fake images, labels and codes
noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_
size])
fake_labels = np.eye(num_labels)[np.random.choice(num_labels,
batch_size)]
fake_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])
fake_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])
inputs = [noise, fake_labels, fake_code1, fake_code2]
fake_images = generator.predict(inputs)
# real + fake images = 1 batch of train data
x = np.concatenate((real_images, fake_images))
labels = np.concatenate((real_labels, fake_labels))
codes1 = np.concatenate((real_code1, fake_code1))
codes2 = np.concatenate((real_code2, fake_code2))
# label real and fake images
# real images label is 1.0
y = np.ones([2 * batch_size, 1])
# fake images label is 0.0
[ 175 ]