Example: multioutput autoencoder
import keras
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense, UpSampling2D
batch_size = 100
num_classes = 10
epochs = 10
# input image dimensions
img_rows, img_cols = 28, 28
# Data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1).astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Convolutional Encoder
input_img = Input(shape=(img_rows, img_cols, 1))
conv_1 = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
pool_1 = MaxPooling2D((2, 2), padding='same')(conv_1)
conv_2 = Conv2D(8, (3, 3), activation='relu', padding='same')(pool_1)
pool_2 = MaxPooling2D((2, 2), padding='same')(conv_2)
conv_3 = Conv2D(8, (3, 3), activation='relu', padding='same')(pool_2)
encoded= MaxPooling2D((2, 2), padding='same')(conv_3)
# Classification
flatten = Flatten()(encoded)
fc = Dense(128, activation='relu')(flatten)
softmax = Dense(num_classes, activation='softmax', name='classification')(fc)
# Decoder
conv_4 = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
up_1 = UpSampling2D((2, 2))(conv_4)
conv_5 = Conv2D(8, (3, 3), activation='relu', padding='same')(up_1)
up_2 = UpSampling2D((2, 2))(conv_5)
conv_6 = Conv2D(16, (3, 3), activation='relu')(up_2)
up_3 = UpSampling2D((2, 2))(conv_6)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='autoencoder')(up_3)
model = Model(inputs=input_img, outputs=[softmax, decoded])
model.compile(loss={'classification': 'categorical_crossentropy',
'autoencoder': 'binary_crossentropy'},
loss_weights={'classification': 1.0,
'autoencoder': 0.5},
optimizer='adam',
metrics={'classification': 'accuracy', 'autoencoder': ['binary_crossentropy', 'mse']})
model.fit(x_train,
{'classification': y_train, 'autoencoder': x_train},
batch_size=batch_size,
epochs=epochs,
validation_data= (x_test, {'classification': y_test, 'autoencoder': x_test}),
verbose=1)