def autoencoder_old(path_train, path_valid, batch_size, epochs, path_save, option):
    #!/usr/bin/env python3
    # -*- coding: utf8 -*-
    import ctypes
    import os
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
    os.environ['TF_ENABLE_XLA'] = '1'
    os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
    os.environ['TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32'] = '1'
    os.environ['TF_DISABLE_CUDNN_TENSOR_OP_MATH'] = '1'
    os.environ['TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32'] = '1'


    import numpy as np
    import tensorflow as tf

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if gpus:
        try:
            tf.config.experimental.set_virtual_device_configuration(gpus[0], [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4096)])
        except RuntimeError as e:
            print(e)

    from keras.models import Sequential
    from keras.layers import InputLayer, Dense, Flatten, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, LeakyReLU, Reshape, Activation, BatchNormalization, UpSampling2D
    from keras.activations import selu
    from keras.constraints import max_norm
    from keras.optimizers import Adam, SGD
    from keras.utils import to_categorical, normalize
    from keras.callbacks import ModelCheckpoint
    from keras.datasets import mnist
    from keras import losses
    from keras import backend as K
    import matplotlib.pyplot as plt
    import numpy as np
    import random

    #custom generator import
    from DataGenerator import DataGenerator

    # GET DATA
    # def getData():
    # Datasets
    # path_train = '/media/aneumann/Harddisk/Datenbanken/PythonTest/test/Preprocessing/Train'
    # path_valid = '/media/aneumann/Harddisk/Datenbanken/PythonTest/test/Preprocessing/Validation'

    train = os.listdir(path_train)
    train = sorted(train)
    valid = os.listdir(path_valid)
    valid = sorted(valid)

    #
    # def batch_generator(path, batch_size):
    #     files = os.listdir(path)
    #     files = sorted(files,key=lambda x: int(x.split("_",2)[1]))
    #     ids = np.arange(len(files))
    #     np.random.shuffle(ids)
    #
    #     batch = []
    #     batch_counter = 0
    #     # while batch_counter < 10:#np.floor(len(ids)/batch_size):
    #
    #     while True:
    #         # np.random.shuffle(ids)
    #         X = list()
    #         Y = list()
    #
    #         for i in ids:
    #             batch.append(i)
    #             file = files[i]
    #
    #             try:
    #                 loadfile = np.load(path + '/' + file)
    #             except Exception as e:
    #                 print("Couldn't load file: " + str(file) + "\nError: " + str(e))
    #
    #             xl = np.nan_to_num(loadfile['x'], nan=0.0001, posinf=1000, neginf=-1000)
    #             yl = np.nan_to_num(loadfile['y'], nan=0.0001, posinf=1000, neginf=-1000)
    #             xl = (xl-np.mean(xl))/np.std(xl)
    #             yl = (yl-np.mean(xl))/np.std(xl)
    #             X.extend(xl)
    #             Y.extend(yl)
    #
    #             if len(batch)==batch_size:
    #                 yield np.array(X), np.array(Y)
    #                 batch = []
    #                 X = list()
    #                 Y = list()
    #
    #                 batch_counter += 1
    #         break
    #
    # train_generator = batch_generator(path_train, batch_size)
    # valid_generator = batch_generator(path_valid, batch_size)

    train_generator = DataGenerator(path_train, option, batch_size, True)
    valid_generator = DataGenerator(path_valid, option, batch_size, True)

    # BUILD MODEL

    max_norm_value = 100.0
    model = Sequential([
        # InputLayer(input_shape=(256,128,1)),
        # BatchNormalization(axis=1),
        Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", input_shape=(256, 128, 1), name="enc_conv1"),
        LeakyReLU(),
        # BatchNormalization(),
        Dropout(0.3, name="enc_drop1"),
        MaxPooling2D(pool_size=(2,2), name="enc_pool1"),
        Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv2"),
        LeakyReLU(),
        MaxPooling2D(pool_size=(2,2), name="enc_pool2"),
        Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv3"),
        LeakyReLU(),
        MaxPooling2D(pool_size=(2,2), name="enc_pool3"),
        Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="enc_conv4"),
        LeakyReLU(),
        MaxPooling2D(pool_size=(2,2), name="enc_pool4"),
        Flatten(),
        Dense(units = 8192),
        Reshape((32,16,16)),
        UpSampling2D(size=(2,2), name="dec_up1"),
        Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv1"),
        LeakyReLU(),
        UpSampling2D(size=(2,2), name="dec_up2"),
        Conv2D(filters=32, kernel_size=(3,3), strides=1, padding="same", kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv2"),
        LeakyReLU(),
        Dropout(0.3, name="dec_drop1"),
        UpSampling2D(size=(2,2), name="dec_up3"),
        Conv2D(filters=1, kernel_size=(3,3), strides=1, padding="same", activation='linear', kernel_constraint=max_norm(max_norm_value), kernel_initializer="he_normal", name="dec_conv3")
    ])
    # opt = Adam(lr=0.000001)
    model.compile(loss='mape', optimizer='adam', metrics=['mse', 'accuracy'])

    # MODEL Train

    if not os.path.exists("weights_cnn"):
            try:
                os.mkdir("weights_cnn")
            except Exception as e:
                print("Konnte Ordner für Gewichte nicht erstellen" + str(e))

    filepath = "weights_cnn/weights-{epoch:02d}-{loss:.4f}.hdf5"
    checkpoint = ModelCheckpoint(
        filepath,
        monitor='loss',
        verbose=0,
        save_best_only=True,
        mode='min'
    )
    model.summary()
    history = model.fit(train_generator, steps_per_epoch = int(np.floor(len(train) / batch_size)), epochs = epochs, validation_data=valid_generator, validation_steps = int(np.floor(len(valid) / batch_size)), use_multiprocessing=True)
    model.summary()
    model.save(path_save + '/cnn_base_whole.h5')

    plt.style.use("ggplot")
    plt.figure()
    plt.plot(history.history["loss"], label="train_loss")
    plt.plot(history.history["val_loss"], label="val_loss")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epochs")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="lower left")
    plt.savefig(path_save + '/testrun.png', dpi=400)

    plt.show()