Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
# initializing
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import ctypes
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
os.environ['TF_ENABLE_XLA'] = '1'
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
os.environ['TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32'] = '1'
os.environ['TF_DISABLE_CUDNN_TENSOR_OP_MATH'] = '1'
os.environ['TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32'] = '1'
import numpy as np
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
except RuntimeError as e:
print(e)
from keras.models import Sequential
from keras.layers import InputLayer, Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, LeakyReLU, Reshape, Activation, BatchNormalization, UpSampling2D
from keras.activations import selu, linear
from keras.constraints import max_norm
from keras.optimizers import Adam, SGD
from keras.utils import to_categorical, normalize
from keras.callbacks import ModelCheckpoint
from keras.datasets import mnist
from keras import losses
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
import random
#custom generator import
# from Generators.DataGenerator_whole import DataGenerator
#
# train = os.listdir(path_train)
# train = sorted(train)
# valid = os.listdir(path_valid)
# valid = sorted(valid)
#
# train_generator = DataGenerator(path_train, option, batch_size, True)
# valid_generator = DataGenerator(path_valid, option, batch_size, True)
# BUILD MODEL
max_norm_value = 100.0
model = Sequential([
InputLayer(input_shape=(260,128,1)),
BatchNormalization(),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv1"),
LeakyReLU(),
Dropout(0.3, name="enc_drop1"),
MaxPooling2D(pool_size=(2,2), name="enc_pool1"),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv2"),
LeakyReLU(),
MaxPooling2D(pool_size=(2,2), name="enc_pool2"),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv3"),
LeakyReLU(),
MaxPooling2D(pool_size=(2,2), name="enc_pool3"),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv4"),
LeakyReLU(),
MaxPooling2D(pool_size=(2,2), name="enc_pool4"),
Flatten(),
Dense(units = 4352),
Reshape((17,8,32)),
UpSampling2D(size=(2,2), name="dec_up1"),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv1"),
LeakyReLU(),
UpSampling2D(size=(2,2), name="dec_up2"),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv2"),
LeakyReLU(),
UpSampling2D(size=(2,2), name="dec_up3"),
Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv3"),
LeakyReLU(),
Dropout(0.3, name="dec_drop1"),
UpSampling2D(size=(2,2), name="dec_up4"),
Conv2D(filters=1, kernel_size=(3,3), strides=1, padding="same", activation="linear", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv4")
])
model.compile(loss='mape', optimizer='adam', metrics=['mse', 'accuracy'])
model.summary()