forked from despoisj/LatentSpaceVisualization
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmodel.py
86 lines (68 loc) · 2.72 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import os
import sys
import h5py
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from scipy.stats import norm
from sklearn import manifold
from keras.layers import Input, Dense, Lambda, Flatten, Reshape
from keras.layers import Convolution2D, UpSampling2D, MaxPooling2D
from keras.models import Model
from keras.layers.advanced_activations import ELU
from keras import backend as K
from keras import objectives
from config import latent_dim, imageSize
# Convolutional models
# x is input, z is
def getModels():
input_img = Input(shape=(imageSize, imageSize, 1))
x = Convolution2D(32, 3, 3, border_mode='same')(input_img)
x = ELU()(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(64, 3, 3, border_mode='same')(x)
x = ELU()(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
# Latent space // bottleneck layer
x = Flatten()(x)
x = Dense(latent_dim)(x)
z = ELU()(x)
##### MODEL 1: ENCODER #####
encoder = Model(input_img, z)
# We instantiate these layers separately so as to reuse them for the decoder
# Dense from latent space to image dimension
x_decoded_dense1 = Dense(7 * 7 * 64)
# Reshape for image
x_decoded_reshape0 = Reshape((7, 7, 64))
x_decoded_upsample0 = UpSampling2D((2, 2))
x_decoded_conv0 = Convolution2D(32, 3, 3, border_mode='same')
x_decoded_upsample3 = UpSampling2D((2, 2))
x_decoded_conv3 = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')
# Create second part of autoencoder
x_decoded = x_decoded_dense1(z)
x_decoded = ELU()(x_decoded)
x_decoded = x_decoded_reshape0(x_decoded)
x_decoded = x_decoded_upsample0(x_decoded)
x_decoded = x_decoded_conv0(x_decoded)
x_decoded = ELU()(x_decoded)
# Tanh layer
x_decoded = x_decoded_upsample3(x_decoded)
decoded_img = x_decoded_conv3(x_decoded)
##### MODEL 2: AUTO-ENCODER #####
autoencoder = Model(input_img, decoded_img)
# Create decoder
input_z = Input(shape=(latent_dim,))
x_decoded_decoder = x_decoded_dense1(input_z)
x_decoded_decoder = ELU()(x_decoded_decoder)
x_decoded_decoder = x_decoded_reshape0(x_decoded_decoder)
x_decoded_decoder = x_decoded_upsample0(x_decoded_decoder)
x_decoded_decoder = x_decoded_conv0(x_decoded_decoder)
x_decoded_decoder = ELU()(x_decoded_decoder)
# Tanh layer
x_decoded_decoder = x_decoded_upsample3(x_decoded_decoder)
decoded_decoder_img = x_decoded_conv3(x_decoded_decoder)
##### MODEL 3: DECODER #####
decoder = Model(input_z, decoded_decoder_img)
return autoencoder, encoder, decoder