Commit 808d3c2b authored by T.H.C. Heshan's avatar T.H.C. Heshan

Update model.py

parent 054d3337
...@@ -102,16 +102,22 @@ norm_layer.adapt(data=train_spectrogram_ds.map(map_func=lambda spec, label: spec ...@@ -102,16 +102,22 @@ norm_layer.adapt(data=train_spectrogram_ds.map(map_func=lambda spec, label: spec
model = models.Sequential([ model = models.Sequential([
layers.Input(shape=input_shape), layers.Input(shape=input_shape),
# Downsample the input. # Downsample the input.
#This layer resizes the input to a target size of 32x32. It is commonly used to ensure a consistent input shape for subsequent layers.
layers.Resizing(32, 32), layers.Resizing(32, 32),
# Normalize. # Normalize.
#This is the normalization layer that you previously adapted with the training data.
norm_layer, norm_layer,
#This is a convolutional layer with 32 filters, a kernel size of 3x3, and ReLU activation. It performs convolutional operations on the input.
layers.Conv2D(32, 3, activation='relu'), layers.Conv2D(32, 3, activation='relu'),
layers.Conv2D(64, 3, activation='relu'), layers.Conv2D(64, 3, activation='relu'),
#This layer performs max pooling, which reduces the spatial dimensions of the input data.
layers.MaxPooling2D(), layers.MaxPooling2D(),
layers.Dropout(0.25), layers.Dropout(0.25),
#This layer flattens the input data into a 1-dimensional vector, preparing it for the fully connected layers.
layers.Flatten(), layers.Flatten(),
layers.Dense(128, activation='relu'), layers.Dense(128, activation='relu'),
layers.Dropout(0.5), layers.Dropout(0.5),
#This is the output layer with num_labels units, representing the number of output classes in your classification problem.
layers.Dense(num_labels), layers.Dense(num_labels),
]) ])
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment