Define and compile Model_CNN

PHOTO EMBED

Wed Aug 03 2022 12:35:48 GMT+0000 (Coordinated Universal Time)

Saved by @mnis00014

def get_model():
    
    # input layer
    inputs = tf.keras.layers.Input(shape=(3, 256, 256), name="input_layer")

    # 1st convolution block
    x = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
    x = tf.keras.layers.MaxPooling2D((2, 2))(x)
    
    # 2nd convolution block
    x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(x)
    x = tf.keras.layers.MaxPooling2D((2, 2))(x)
    
    # 3rd convolution block
    x = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(x)
    x = tf.keras.layers.MaxPooling2D((2, 2))(x)
    
    # 4th convolution block
    x = tf.keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding='same')(x)
    x = tf.keras.layers.MaxPooling2D((2, 2))(x)
    
    # 5th convolution block
    x = tf.keras.layers.Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding='same')(x)
    x = tf.keras.layers.MaxPooling2D((2, 2))(x)

    # Fully Connected layer
    x = Flatten()(x)
    x = tf.keras.layers.Dense(512, activation = 'relu')(x)

    # Output layer
    outputs = tf.keras.layers.Dense(1, activation="sigmoid", name="output_layer")(x)

    # Combine the inputs with the outputs into a model
    model = tf.keras.Model(inputs, outputs)
    
    return model
  


-----

tf.keras.backend.clear_session()
 
model = get_model()
 
# compile model
model.compile(loss='binary_crossentropy',
              optimizer=tf.keras.optimizers.Adam(learning_rate = 0.001, decay=1e-6),
              metrics=['accuracy', 'Recall', 'Precision'])
 
 
# make directory for logs
logdir = os.path.join('logs', model_name)
#os.mkdir(logdir)
 
from math import floor
N_FLODS = 5
INIT_LR = 3e-4
T_BS = 16
V_BS = 16
decay_rate = 0.95
decay_step = 1
 
# early stopping
cp = EarlyStopping(monitor ='val_loss', mode = 'min', verbose = 2, patience = PATIENCE, restore_best_weights=True)
 
mc = ModelCheckpoint(model_name, monitor = 'val_loss', mode = 'min', verbose = 2, save_best_only = True)
 
tsb = TensorBoard(log_dir=logdir)
 
lrs = LearningRateScheduler(lambda epoch : INIT_LR * pow(decay_rate, floor(epoch / decay_step)))


# training
start = timer()
 
# Fit the model 
history_**= model.fit(train_g, 
                      epochs=100,
                      steps_per_epoch=len(train_g),
                      validation_data=val_g,
                      validation_steps=len(val_g), 
                      callbacks= [cp, mc, tsb, lrs])
 
 
end = timer()
elapsed = end - start
print('Total Time Elapsed: ', int(elapsed//60), ' minutes ', (round(elapsed%60)), ' seconds')
                                  
--
                                  
model.evaluate(val_g, verbose = 2, workers = 7)
plot_performance_single(history_**)

---
  
model.summary()

---
  
cm_report(test_g)

---
  
cm(test_g)

--

roc_auc_single(test_g)

--
content_copyCOPY