Skip to content
Snippets Groups Projects
Skip_Autoencoder.py 7.88 KiB
Newer Older
  • Learn to ignore specific revisions
  • Anna Neumann's avatar
    Anna Neumann committed
    def skip_autoencoder(path_train, path_valid, batch_size, epochs, path_save_model, path_weights_model, weight, plot_name, option, reduction_divisor):
    
    Anna Neumann's avatar
    Anna Neumann committed
        # initializing
    
    
        #!/usr/bin/env python3
        # -*- coding: utf8 -*-
        import ctypes
        import os
        os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
        os.environ['TF_ENABLE_XLA'] = '1'
        os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
        os.environ['TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32'] = '1'
        os.environ['TF_DISABLE_CUDNN_TENSOR_OP_MATH'] = '1'
        os.environ['TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32'] = '1'
    
        import numpy as np
        import tensorflow as tf
        from tensorflow import keras
        from keras.layers import Add, Input, Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, LeakyReLU, Reshape, Activation, BatchNormalization, UpSampling2D
        from keras.models import Model
        from keras.constraints import max_norm
        from keras.optimizers import Adam, SGD
        from keras.utils import to_categorical, normalize
        from keras.callbacks import ModelCheckpoint
        from keras import losses
        from keras import backend as K
        import matplotlib.pyplot as plt
        import random
    
    
    Anna Neumann's avatar
    Anna Neumann committed
        # searching for gpus and using it/them
    
    
        gpus = tf.config.experimental.list_physical_devices('GPU')
        if gpus:
            try:
                tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
            except RuntimeError as e:
                print(e)
    
    
    Anna Neumann's avatar
    Anna Neumann committed
    
        # custom generator import
        from Generators.DataGenerator_framelength import DataGenerator
    
        def count_files(dir):
            return len([1 for x in list(os.scandir(dir)) if x.is_file()])
        len_train = count_files(path_train)
        len_valid = count_files(path_valid)
    
        # train = os.listdir(path_train)
        # train = sorted(train,key=lambda x: (int(x.split("_")[1])))
        # valid = os.listdir(path_valid)
        # valid = sorted(valid,key=lambda x: (int(x.split("_")[1])))
    
        # generator for train and validation
    
        train_generator = DataGenerator(path_train, option, reduction_divisor, len_train, batch_size, True)
        valid_generator = DataGenerator(path_valid, option, reduction_divisor, len_valid, batch_size, True)
    
    
    Anna Neumann's avatar
    Anna Neumann committed
        # building model
    
    
        def build_model():
    
            max_norm_value = 100.0
    
            # defining input and normalizing and filtering to 5
    
            input = Input(shape=(260,5,1))
    
            normalized = BatchNormalization()(input)
    
            # conv_0 = Conv2D(filters=32, kernel_size=(1,11), strides=1, padding='valid', kernel_constraint=max_norm(max_norm_value), kernel_initializer='he_normal')(normalized)
    
            # leakyrelu_0 = LeakyReLU()(conv_0)
    
            conv_1 = Conv2D(filters=32, kernel_size=(1,3),strides=1,padding="valid",kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(normalized)
    
            leakyrelu_1 = LeakyReLU()(conv_1)
            conv_2 = Conv2D(filters=32, kernel_size=(1,3),strides=1,padding="valid",kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(leakyrelu_1)
            leakyrelu_2 = LeakyReLU()(conv_2)
            conv_3 = Conv2D(filters=32, kernel_size=(16,1),strides=1,padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(leakyrelu_2)
            leakyrelu_3 = LeakyReLU()(conv_3)
            maxpool_1 = MaxPooling2D(pool_size=(2,1))(leakyrelu_3)
            conv_4 = Conv2D(filters=32, kernel_size=(16,1),strides=1,padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(maxpool_1)
            leakyrelu_4 = LeakyReLU()(conv_4)
            maxpool_2 = MaxPooling2D(pool_size=(2,1))(leakyrelu_4)
            conv_5 = Conv2D(filters=32, kernel_size=(16,1),strides=1,padding="valid", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(maxpool_2)
            leakyrelu_5 = LeakyReLU()(conv_5)
            conv_6 = Conv2D(filters=32, kernel_size=(16,1),strides=1,padding="valid", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(leakyrelu_5)
            leakyrelu_6 = LeakyReLU()(conv_6)
    
            convtrans_1 = Conv2DTranspose(filters=32, kernel_size=(16,1),strides=1,padding="valid", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(leakyrelu_6)
            leakyrelu_7 = LeakyReLU()(convtrans_1)
            convtrans_2 = Conv2DTranspose(filters=32, kernel_size=(16,1),strides=1, padding="valid", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(leakyrelu_7)
            leakyrelu_8 = LeakyReLU()(convtrans_2)
            skip_1 = Add()([maxpool_2,leakyrelu_8])
            up_1 = UpSampling2D(size=(2,1))(skip_1)
            conv_7 = Conv2D(filters=32, kernel_size=(16,1), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(up_1)
            leakyrelu_9 = LeakyReLU()(conv_7)
            skip_2 = Add()([leakyrelu_4,leakyrelu_9])
            up_2 = UpSampling2D(size=(2,1))(skip_2)
            conv_8 = Conv2D(filters=32, kernel_size=(16,1), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(up_2)
            leakyrelu_10 = LeakyReLU()(conv_8)
            skip_3 = Add()([leakyrelu_3,leakyrelu_10])
            conv_9 = Conv2D(filters=1, kernel_size=(16,1),strides=1, padding="same", activation='linear', kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal")(skip_3)
    
    
    Anna Neumann's avatar
    Anna Neumann committed
            # defining model with input and output (i.e. target)
    
    
            model = Model(inputs=input, outputs=conv_9)
            return model
    
    
        # building and compiling model
    
        model = build_model()
        model.compile(loss='mse', optimizer='adam', metrics=['mape','accuracy'])
    
    Anna Neumann's avatar
    Anna Neumann committed
        model.load_weights(weight)
    
    
        # making directory for training weights
    
        print(path_weights_model)
    
        if not os.path.exists(path_weights_model):
            try:
                os.mkdir(path_weights_model)
            except Exception as e:
    
    Anna Neumann's avatar
    Anna Neumann committed
                print("Konnte Ordner fuer Gewichte nicht erstellen" + str(e))
    
    
        # defining how weights are saved
    
    
    Anna Neumann's avatar
    Anna Neumann committed
        filepath = path_weights_model + "/weights-{epoch:04d}.hdf5"
    
        checkpoint = ModelCheckpoint(
            filepath,
            monitor='loss',
            verbose=0,
            save_best_only=True,
            mode='min'
        )
        model.summary()
    
        # train model
    
    Anna Neumann's avatar
    Anna Neumann committed
        history = model.fit(train_generator, steps_per_epoch = int(np.floor(len_train // reduction_divisor / batch_size)), epochs = epochs, validation_data = valid_generator, validation_steps = int(np.floor(len_valid // reduction_divisor / batch_size)), callbacks=[checkpoint], use_multiprocessing=True)
    
        model.summary()
    
        # save model after training
    
        model.save(path_save_model)
    
        # save history
    
        index = path_save_model.split("/")
        path_save_nomodel = '/'.join(index[0:-1])
    
        loss_history = history.history["loss"]
        mape_history = history.history["mape"]
        accuracy_history = history.history["accuracy"]
        val_loss_history = history.history["val_loss"]
        val_mape_history = history.history["val_mape"]
        val_accuracy_history = history.history["val_accuracy"]
    
        dict_history = {'loss':loss_history, 'mape':mape_history, 'accuracy':accuracy_history, 'val_loss':val_loss_history, 'val_mape':val_mape_history, 'val_accuracy':val_accuracy_history}
        np.save(path_save_nomodel + '/' + plot_name, dict_history)
    
    
        # plot train and validation loss after training ends
    
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(history.history["loss"], label="train_loss")
        plt.plot(history.history["val_loss"], label="val_loss")
        plt.title("Training Loss and Accuracy")
        plt.xlabel("Epochs")
        plt.ylabel("Loss/Accuracy")
        plt.legend(loc="lower left")
        index = path_save_model.split("/")
        path_save_nomodel = '/'.join(index[0:-1])
        plt.savefig(path_save_nomodel + '/' + plot_name + '.png', dpi=400)
    
        plt.show()
    
        return